# Benchmark
## Raku
```pl
#!/usr/bin/env raku
# sort jobs depending on the current-time/priority/submit-time
sub schedule(@str_jobs) {
my %result;
my $time = 0;
my @jobs = @str_jobs.map: { %(
Z=> .split(',')) };
my $bag = @jobs.map({.}).Bag;
while $_ = @jobs.min(:p, { $^a <=> $time, -$^a, $^a }).head {
my $j = @jobs[.key]:delete;
$time = $j if $time < $j; # skip time
my $ends = $time + $j;
my $wait = $time > $j ?? $time - $j !! 0;
%result{$j} += $wait;
printf "%s (prio %d): Submits at %3d, starts at %3d, ends at %3d. Wait time = %3d.\n",
|$j, $time, $ends, $wait;
$time = $ends;
}
do hyper for %result.keys.sort.reverse -> $prio {
"Priority-{$prio}: {%result{$prio} div $bag{$prio}}"
}
}
### Test
# format:
# JOB_ID,SUBMIT_TIMESTAMP,PROCESSING_TIME_MS,PRIORITY
my @data = (
q:to/EOI/,
jobA,0,10,3
jobB,2,5,1
jobC,3,8,3
jobD,7,4,2
EOI
q:to/EOI/,
jobB,2,5,1
jobA,0,10,3
jobC,3,8,3
jobD,7,4,2
jobE,20,4,3
EOI
q:to/EOI/,
jobA,0,5,1
jobB,100,10,5
jobC,101,5,5
EOI
);
for @data -> $data {
say schedule($data.chomp.split("\n"));
}
```
## Python (I am not the author of that one)
```py
#!/usr/bin/env python3
import heapq
from collections import defaultdict
def PriorityTaskScheduler(strArr):
"""
Simulates a priority-based CPU scheduler to calculate the average wait time
for each priority level.
Time Complexity: O(N log N) - Dominated by sorting the initial jobs and heap operations.
Space Complexity: O(N) - To store the parsed jobs and the priority queue.
"""
jobs = strArr
if not jobs:
return []
parsed_jobs = []
for job_string in jobs:
try:
_job_id, submit, process, prio = job_string.split(',')
parsed_jobs.append([int(submit), int(process), int(prio)])
except (ValueError, IndexError):
continue
parsed_jobs.sort()
current_time = 0
job_index = 0
num_jobs = len(parsed_jobs)
priority_stats = defaultdict(lambda: [0, 0])
wait_queue = []
processed_count = 0
while processed_count < num_jobs:
while job_index < num_jobs and parsed_jobs[job_index][0] <= current_time:
submit_time, process_time, priority = parsed_jobs[job_index]
heapq.heappush(wait_queue, (-priority, submit_time, process_time))
job_index += 1
if wait_queue:
neg_priority, submit_time, process_time = heapq.heappop(wait_queue)
priority = -neg_priority
wait_time = current_time - submit_time
priority_stats[priority][0] += wait_time
priority_stats[priority][1] += 1
current_time += process_time
processed_count += 1
else:
if job_index < num_jobs:
current_time = parsed_jobs[job_index][0]
output = []
sorted_priorities = sorted(priority_stats.keys(), reverse=True)
for priority in sorted_priorities:
total_wait, count = priority_stats[priority]
avg_wait = total_wait // count
output.append(f"Priority-{priority}: {avg_wait}")
return output
data = [
"jobA,0,10,3\njobB,2,5,1\njobC,3,8,3\njobD,7,4,2\n",
"jobB,2,5,1\njobA,0,10,3\njobC,3,8,3\njobD,7,4,2\njobE,20,4,3",
"jobA,0,5,1\njobB,100,10,5\njobC,101,5,5",
]
for d in data:
print(PriorityTaskScheduler(d.split("\n")))
```
## Result
Using https://github.com/andrewrk/poop
```
$ poop ./scheduler.raku ./scheduler.py
Benchmark 1 (23 runs): ./scheduler.raku
measurement mean ± σ min … max outliers delta
wall_time 220ms ± 27.7ms 203ms … 297ms 3 (13%) 0%
peak_rss 189MB ± 1.62MB 185MB … 192MB 8 (35%) 0%
cpu_cycles 1.20G ± 27.2M 1.13G … 1.23G 0 ( 0%) 0%
instructions 1.85G ± 17.2M 1.81G … 1.86G 1 ( 4%) 0%
cache_references 104M ± 2.26M 98.4M … 107M 0 ( 0%) 0%
cache_misses 15.0M ± 544K 13.8M … 15.6M 5 (22%) 0%
branch_misses 8.16M ± 87.1K 7.94M … 8.33M 0 ( 0%) 0%
Benchmark 2 (325 runs): ./scheduler.py
measurement mean ± σ min … max outliers delta
wall_time 15.3ms ± 1.07ms 13.4ms … 19.2ms 7 ( 2%) ⚡- 93.0% ± 1.4%
peak_rss 10.7MB ± 113KB 10.4MB … 11.0MB 9 ( 3%) ⚡- 94.3% ± 0.1%
cpu_cycles 42.4M ± 724K 41.3M … 47.6M 13 ( 4%) ⚡- 96.5% ± 0.2%
instructions 65.0M ± 45.2K 64.9M … 65.1M 3 ( 1%) ⚡- 96.5% ± 0.1%
cache_references 4.03M ± 30.5K 3.81M … 4.12M 12 ( 4%) ⚡- 96.1% ± 0.2%
cache_misses 711K ± 9.22K 667K … 767K 6 ( 2%) ⚡- 95.3% ± 0.4%
branch_misses 497K ± 3.03K 490K … 507K 3 ( 1%) ⚡- 93.9% ± 0.1%
```