-
Notifications
You must be signed in to change notification settings - Fork 32
Expand file tree
/
Copy pathcli.rb
More file actions
153 lines (126 loc) · 4.92 KB
/
cli.rb
File metadata and controls
153 lines (126 loc) · 4.92 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
# frozen_string_literal: true
require 'fileutils'
require_relative '../argument_parser'
require_relative '../cpu_config'
require_relative '../benchmark_runner'
require_relative '../benchmark_suite'
require_relative '../results_table_builder'
module BenchmarkRunner
class CLI
BOLD = "\e[1m"
RESET = "\e[0m"
attr_reader :args
def self.run(argv = ARGV)
args = ArgumentParser.parse(argv)
new(args).run
end
def initialize(args)
@args = args
end
def run
CPUConfig.configure_for_benchmarking(turbo: args.turbo) unless args.no_sudo
# Create the output directory
FileUtils.mkdir_p(args.out_path)
ruby_descriptions = {}
suite = BenchmarkSuite.new(
categories: args.categories,
name_filters: args.name_filters,
excludes: args.excludes,
out_path: args.out_path,
harness: args.harness,
harness_explicit: args.harness_explicit,
pre_init: args.with_pre_init,
no_pinning: args.no_pinning,
force_pinning: args.force_pinning
)
# Collect ruby version descriptions for all executables upfront
args.executables.each do |name, executable|
ruby_descriptions[name] = `#{executable.shelljoin} -v`.chomp
end
# Warn if two executables look identical (same ruby -v output and same flags)
names = ruby_descriptions.keys
names.each_with_index do |name_a, i|
names[(i + 1)..].each do |name_b|
flags_a = args.executables[name_a][1..] || []
flags_b = args.executables[name_b][1..] || []
if ruby_descriptions[name_a] == ruby_descriptions[name_b] && flags_a == flags_b
warn "#{BOLD}WARNING: '#{name_a}' and '#{name_b}' appear identical (same revision, same flags). This is likely a mistake.#{RESET}"
end
end
end
bench_start_time = Time.now.to_f
bench_data = {}
bench_failures = {}
if args.interleave
args.executables.each_key { |name| bench_data[name] = {} }
entries = suite.benchmarks
entries.each_with_index do |entry, idx|
# Alternate executable order to cancel cache-warming bias
exes = ruby_descriptions.keys
exes = exes.reverse if idx.odd?
exes.each do |name|
puts("Running benchmark \"#{entry.name}\" [#{name}] (#{idx+1}/#{entries.length})")
result = suite.run_benchmark(entry, ruby: args.executables[name], ruby_description: ruby_descriptions[name])
if result[:data]
bench_data[name][entry.name] = result[:data]
else
bench_failures[name] ||= {}
bench_failures[name][entry.name] = result[:failure]
end
end
end
else
args.executables.each do |name, executable|
bench_data[name], failures = suite.run(
ruby: executable,
ruby_description: ruby_descriptions[name]
)
bench_failures[name] = failures unless failures.empty?
end
end
bench_end_time = Time.now.to_f
bench_total_time = (bench_end_time - bench_start_time).to_i
puts("Total time spent benchmarking: #{bench_total_time}s")
if !bench_failures.empty?
puts("Failed benchmarks: #{bench_failures.map { |k, v| v.size }.sum}")
end
puts
# Build results table
builder = ResultsTableBuilder.new(
executable_names: ruby_descriptions.keys,
bench_data: bench_data,
include_rss: args.rss,
include_pvalue: args.pvalue,
zjit_stats: args.zjit_stats
)
table, format = builder.build
output_path = BenchmarkRunner.output_path(args.out_path, out_override: args.out_override)
# Save the raw data as JSON
out_json_path = BenchmarkRunner.write_json(output_path, ruby_descriptions, bench_data)
# Save data as CSV so we can produce tables/graphs in a spreasheet program
# NOTE: we don't do any number formatting for the output file because
# we don't want to lose any precision
BenchmarkRunner.write_csv(output_path, ruby_descriptions, table)
# Save the output in a text file that we can easily refer to
output_str = BenchmarkRunner.build_output_text(ruby_descriptions, table, format, bench_failures, include_rss: args.rss, include_gc: builder.include_gc?, include_pvalue: args.pvalue)
out_txt_path = output_path + ".txt"
File.open(out_txt_path, "w") { |f| f.write output_str }
# Print the table to the console, with numbers truncated
puts(output_str)
# Print JSON and PNG file names
puts
puts "Output:"
puts out_json_path
if args.graph
puts BenchmarkRunner.render_graph(out_json_path)
end
if !bench_failures.empty?
puts "\nFailed benchmarks:"
bench_failures.each do |name, data|
puts " #{name}: #{data.keys.join(", ")}"
end
exit(1)
end
end
end
end