Skip to content

Commit 28d9a80

Browse files
authored
Merge pull request #3 from AN00P-G/patch-2
Update index.qmd
2 parents f6cf2cb + 1bf9a00 commit 28d9a80

File tree

1 file changed

+352
-4
lines changed
  • allhands/spring2025/weekeleven/teamone

1 file changed

+352
-4
lines changed

allhands/spring2025/weekeleven/teamone/index.qmd

Lines changed: 352 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -129,33 +129,381 @@ def __add__(self, other: "ListQueueDisplay") -> "ListQueueDisplay":
129129

130130
### Benchmarking
131131

132+
There are two main benchmarking function in our project.
133+
134+
#### Basic analysis
135+
136+
```python
137+
def analyze_queue(queue_class, size=1000):
138+
"""Analyze a queue implementation."""
139+
approach = next(
140+
(k for k, v in QUEUE_IMPLEMENTATIONS.items() if v == queue_class), None
141+
)
142+
if approach is None:
143+
console.print("[red]Unknown queue implementation[/red]")
144+
return
145+
146+
console.print(f"\n{approach.value.upper()} Queue Implementation")
147+
148+
try:
149+
queue = queue_class()
150+
operations = []
151+
152+
# Test enqueue
153+
enqueue_time = time_operation(lambda: [queue.enqueue(i) for i in range(size)])
154+
operations.append(("enqueue", enqueue_time, size))
155+
156+
# Test dequeue
157+
dequeue_count = size // 2
158+
dequeue_time = time_operation(
159+
lambda: [queue.dequeue() for _ in range(dequeue_count)]
160+
)
161+
operations.append(("dequeue", dequeue_time, dequeue_count))
162+
163+
# Refill queue
164+
for i in range(dequeue_count):
165+
queue.enqueue(i)
166+
167+
# Test peek
168+
peek_count = size // 3
169+
peek_time = time_operation(lambda: [queue.peek() for _ in range(peek_count)])
170+
operations.append(("peek", peek_time, peek_count))
171+
172+
# Test concat
173+
other = queue_class()
174+
for i in range(size // 10):
175+
other.enqueue(i)
176+
concat_time = time_operation(lambda: queue + other)
177+
operations.append(("concat", concat_time, size // 10))
178+
179+
# Test iconcat
180+
iconcat_time = time_operation(lambda: queue.__iadd__(other))
181+
operations.append(("iconcat", iconcat_time, size // 10))
182+
183+
# Display results in table
184+
table = Table(
185+
title=f"{approach.value.upper()} Queue Performance Analysis",
186+
box=box.ROUNDED,
187+
show_header=True,
188+
header_style="bold magenta",
189+
)
190+
table.add_column("Operation", style="cyan")
191+
table.add_column("Time (ms)", justify="right")
192+
table.add_column("Elements", justify="right")
193+
table.add_column("Time/Element (ms)", justify="right")
194+
195+
for operation, time_taken, elements in operations:
196+
time_per_element = time_taken / elements if elements > 0 else 0
197+
table.add_row(
198+
operation,
199+
f"{time_taken * 1000:.6f}", # Convert to milliseconds
200+
f"{elements:,}",
201+
f"{time_per_element * 1000:.6f}", # Convert to milliseconds
202+
)
203+
204+
console.print(Panel(table))
205+
206+
except Exception as e:
207+
console.print(f"[red]Error testing {approach.value}: {str(e)}[/red]")
208+
import traceback
209+
210+
console.print(traceback.format_exc())
211+
```
212+
213+
This function performs a basic performance analysis with the following operations:
214+
- Enqueue: Adds size elements to the queue
215+
- Dequeue: Removes size/2 elements
216+
- Peek: Looks at size/3 elements without removing them
217+
- Concat: Concatenates with another queue of size size/10
218+
- Iconcat: In-place concatenation with another queue of size size/10
219+
220+
#### Doubling experiment
221+
222+
```python
223+
224+
def doubling(
225+
initial_size: int = typer.Option(100, help="Initial size for doubling experiment"),
226+
max_size: int = typer.Option(1000, help="Maximum size for doubling experiment"),
227+
dll: bool = typer.Option(True, help="Test DLL implementation"),
228+
sll: bool = typer.Option(True, help="Test SLL implementation"),
229+
array: bool = typer.Option(True, help="Test Array implementation"),
230+
):
231+
"""Run doubling experiment on queue implementations."""
232+
# Create results directory if it doesn't exist
233+
results_dir = Path("results")
234+
results_dir.mkdir(exist_ok=True)
235+
236+
sizes = []
237+
current_size = initial_size
238+
while current_size <= max_size:
239+
sizes.append(current_size)
240+
current_size *= 2
241+
242+
# Dictionary to store all results for plotting
243+
all_results = {}
244+
245+
for approach, queue_class in QUEUE_IMPLEMENTATIONS.items():
246+
if not (
247+
(approach == QueueApproach.dll and dll)
248+
or (approach == QueueApproach.sll and sll)
249+
or (approach == QueueApproach.array and array)
250+
):
251+
continue
252+
253+
try:
254+
console.print(f"\n{approach.value.upper()} Queue Implementation")
255+
results = {
256+
"enqueue": [],
257+
"dequeue": [],
258+
"peek": [],
259+
"concat": [],
260+
"iconcat": [],
261+
}
262+
263+
for size in sizes:
264+
queue = queue_class()
265+
266+
# Enqueue
267+
enqueue_time = time_operation(
268+
lambda: [queue.enqueue(i) for i in range(size)]
269+
)
270+
results["enqueue"].append(enqueue_time)
271+
272+
# Dequeue
273+
dequeue_time = time_operation(
274+
lambda: [queue.dequeue() for _ in range(size // 2)]
275+
)
276+
results["dequeue"].append(dequeue_time)
277+
278+
# Refill queue
279+
for i in range(size // 2):
280+
queue.enqueue(i)
281+
282+
# Peek
283+
peek_time = time_operation(
284+
lambda: [queue.peek() for _ in range(size // 3)]
285+
)
286+
results["peek"].append(peek_time)
287+
288+
# Concat
289+
other = queue_class()
290+
for i in range(size // 10):
291+
other.enqueue(i)
292+
293+
concat_time = time_operation(lambda: queue + other)
294+
results["concat"].append(concat_time)
295+
296+
# Iconcat
297+
iconcat_time = time_operation(lambda: queue.__iadd__(other))
298+
results["iconcat"].append(iconcat_time)
299+
300+
# Store results for plotting
301+
all_results[approach.value] = results
302+
303+
# Display results in table
304+
table = Table(
305+
title=f"{approach.value.upper()} Queue Doubling Experiment Results",
306+
box=box.ROUNDED,
307+
show_header=True,
308+
header_style="bold magenta",
309+
)
310+
table.add_column("Size (n)", justify="right")
311+
for operation in results.keys():
312+
table.add_column(operation, justify="right")
313+
314+
for i, size in enumerate(sizes):
315+
row = [f"{size:,}"]
316+
for operation in results.keys():
317+
value = results[operation][i]
318+
if np.isnan(value): # Check for NaN
319+
row.append("N/A")
320+
else:
321+
row.append(f"{value * 1000:.6f}") # Convert to milliseconds
322+
table.add_row(*row)
323+
324+
console.print(Panel(table))
325+
326+
except Exception as e:
327+
console.print(f"[red]Error testing {approach.value}: {str(e)}[/red]")
328+
import traceback
329+
330+
console.print(traceback.format_exc())
331+
332+
# Generate and save plots
333+
plot_results(sizes, all_results, results_dir)
334+
console.print(f"[green]Plots saved to [bold]{results_dir}[/bold] directory[/green]")
335+
336+
```
337+
338+
This doubling experiment does the following
339+
- Starts with initial_size and doubles the size until reaching max_size
340+
- For each size, measures the same operations as the basic analysis
341+
- Generates plots to visualize the results
342+
343+
#### Key benchmarking feature
344+
345+
##### Timing Mechanism
346+
347+
```python
348+
349+
def time_operation(func):
350+
"""Time an operation using high-precision counter."""
351+
try:
352+
# Warm up
353+
func()
354+
355+
# Actual timing
356+
start_time = perf_counter()
357+
func()
358+
elapsed = perf_counter() - start_time
359+
return elapsed
360+
except Exception as e:
361+
console.print(f"[red]Error during operation: {str(e)}[/red]")
362+
return float("nan")
363+
```
364+
365+
- Uses perf_counter() for high-precision timing
366+
- Includes a warm-up run to avoid cold-start penalties
367+
- Returns elapsed time in seconds
368+
369+
##### Result Visualization
370+
371+
```python
372+
def plot_results(sizes, all_results, results_dir):
373+
"""Generate and save plots for doubling experiment results."""
374+
operations = ["enqueue", "dequeue", "peek", "concat", "iconcat"]
375+
376+
# Create log-log plots for each operation (keeping only these, removing regular operation plots)
377+
for operation in operations:
378+
# Skip regular plots for operations - only create log-log plots
379+
if len(sizes) > 2: # Only create log plots if we have enough data points
380+
plt.figure(figsize=(10, 6))
381+
382+
for impl, results in all_results.items():
383+
times = np.array(results[operation]) * 1000 # Convert to milliseconds
384+
if np.all(times > 0): # Avoid log(0)
385+
plt.loglog(
386+
sizes, times, marker="o", label=f"{impl.upper()}", linewidth=2
387+
)
388+
389+
# Add reference lines for O(1), O(n), O(n²)
390+
x_range = np.array(sizes)
391+
# Add O(1) reference
392+
plt.loglog(
393+
x_range, np.ones_like(x_range) * times[0], "--", label="O(1)", alpha=0.5
394+
)
395+
# Add O(n) reference - scale to fit
396+
plt.loglog(
397+
x_range,
398+
x_range * (times[0] / x_range[0]),
399+
"--",
400+
label="O(n)",
401+
alpha=0.5,
402+
)
403+
# Add O(n²) reference - scale to fit
404+
plt.loglog(
405+
x_range,
406+
np.power(x_range, 2) * (times[0] / np.power(x_range[0], 2)),
407+
"--",
408+
label="O(n²)",
409+
alpha=0.5,
410+
)
411+
412+
plt.title(
413+
f"Log-Log Plot for {operation.capitalize()} Operation", fontsize=16
414+
)
415+
plt.xlabel("Log Queue Size", fontsize=14)
416+
plt.ylabel("Log Time (ms)", fontsize=14)
417+
plt.grid(True, which="both", linestyle="--", alpha=0.5)
418+
plt.legend(fontsize=12)
419+
plt.tight_layout()
420+
421+
# Save log-log plot
422+
log_plot_path = results_dir / f"{operation}_loglog_plot.png"
423+
plt.savefig(log_plot_path)
424+
plt.close()
425+
426+
# Create regular performance plots for each implementation (keeping these, removing log-scale implementation plots)
427+
for impl, results in all_results.items():
428+
plt.figure(figsize=(10, 6))
429+
430+
for operation in operations:
431+
times = np.array(results[operation]) * 1000 # Convert to milliseconds
432+
plt.plot(sizes, times, marker="o", label=operation, linewidth=2)
433+
434+
plt.title(f"{impl.upper()} Queue Implementation Performance", fontsize=16)
435+
plt.xlabel("Queue Size (n)", fontsize=14)
436+
plt.ylabel("Time (ms)", fontsize=14)
437+
plt.grid(True, linestyle="--", alpha=0.7)
438+
plt.legend(fontsize=12)
439+
plt.tight_layout()
440+
441+
# Save plot
442+
plot_path = results_dir / f"{impl}_performance.png"
443+
plt.savefig(plot_path)
444+
plt.close()
445+
```
446+
- Creates log-log plots for each operation to show algorithmic complexity
447+
- Generates regular performance plots for each implementation
448+
- Saves all plots to a results directory
449+
450+
##### Error Handling
451+
452+
- Gracefully handles exceptions during benchmarking
453+
- Report errors with detailed tracebacks
454+
- Continues testing other implementations if one fails
455+
456+
##### Output Format:
457+
458+
- Uses Rich library for formatted console output
459+
- Displays results in tables with:
460+
- Operation name
461+
- Time taken (in milliseconds)
462+
- Number of elements
463+
- Time per element
464+
132465
## Running and Using the Tool
133466

467+
The benchmarking supports three queue implementations:
468+
- DLL (Doubly Linked List)
469+
- SLL (Singly Linked List)
470+
- Array-based Queue
471+
134472
### Setting Up
135473

136474
To run the benchmarking tool, ensure you have Poetry installed onto your device. Navigate to the project directory and install dependencies if you have not already:
137475

138-
`poetry install`
476+
`cd analyze && poetry install`
139477

140478
### Running the Experiments
141479

142-
The tool provides two main benchmarking experiments:
480+
The tool provides two main benchmarking experiments which can also be access by
481+
482+
`poetry run analyze --help`
143483

144484
#### Doubling Experiment
145485

146486
To run the doubling experiment, execute:
147487

148488
`poetry run analyze doubling`
149489

150-
This experiment measures how performance will scale with the increasing input sizes.
490+
This experiment measures how performance will scale with the increasing input sizes.
491+
492+
You can also run:
493+
`poetry run analyze doubling --help`
494+
for more details and detailed apporach
151495

152496
#### Implementation Performance Analysis
153497

154498
To analyze the performance of individual queue operations, run:
155499

156500
`poetry run analyze analyze`
157501

158-
this command will provide execution times for operations like `addList`, `dequeue`, and `enqueue` to compare their efficiency.
502+
This command will provide execution times for operations like `peek`, `dequeue`, and `enqueue` to compare their efficiency.
503+
504+
You can also run:
505+
`poetry run analyze analyze --help`
506+
for more details and detailed apporach
159507

160508
## Output Analysis
161509

0 commit comments

Comments
 (0)