Class: HybridPlatformsConductor::TestsRunner
- Inherits:
-
Object
- Object
- HybridPlatformsConductor::TestsRunner
- Includes:
- LoggerHelpers, ParallelThreads
- Defined in:
- lib/hybrid_platforms_conductor/tests_runner.rb
Overview
Class running tests
Constant Summary
Constants included from LoggerHelpers
LoggerHelpers::LEVELS_MODIFIERS, LoggerHelpers::LEVELS_TO_STDERR
Instance Attribute Summary collapse
-
#max_threads_connection_on_nodes ⇒ Object
Number of threads max to use for tests connecting to nodes [default: 64] Integer.
-
#max_threads_nodes ⇒ Object
Number of threads max to use for tests running at node level [default: 8] Integer.
-
#max_threads_platforms ⇒ Object
Number of threads max to use for tests running at platform level [default: 8] Integer.
-
#reports ⇒ Object
List of reports to use [default: []] Array<Symbol>.
-
#skip_run ⇒ Object
Do we skip running check-node? [default: false] Boolean.
-
#tests ⇒ Object
List of tests to execute [default: []] Array<Symbol>.
Instance Method Summary collapse
-
#initialize(logger: Logger.new(STDOUT), logger_stderr: Logger.new(STDERR), config: Config.new, cmd_runner: CmdRunner.new, platforms_handler: PlatformsHandler.new, nodes_handler: NodesHandler.new, actions_executor: ActionsExecutor.new, deployer: Deployer.new) ⇒ TestsRunner
constructor
Constructor.
-
#options_parse(options_parser) ⇒ Object
Complete an option parser with options meant to control this tests runner.
-
#run_tests(nodes_selectors) ⇒ Object
Run the tests for a defined list of nodes selectors.
Methods included from LoggerHelpers
#err, #init_loggers, #log_component=, #log_debug?, #log_level=, #out, #section, #set_loggers_format, #stderr_device, #stderr_device=, #stderr_displayed?, #stdout_device, #stdout_device=, #stdout_displayed?, #stdouts_to_s, #with_progress_bar
Methods included from ParallelThreads
Constructor Details
#initialize(logger: Logger.new(STDOUT), logger_stderr: Logger.new(STDERR), config: Config.new, cmd_runner: CmdRunner.new, platforms_handler: PlatformsHandler.new, nodes_handler: NodesHandler.new, actions_executor: ActionsExecutor.new, deployer: Deployer.new) ⇒ TestsRunner
Constructor
- Parameters
-
logger (Logger): Logger to be used [default: Logger.new(STDOUT)]
-
logger_stderr (Logger): Logger to be used for stderr [default: Logger.new(STDERR)]
-
config (Config): Config to be used. [default: Config.new]
-
cmd_runner (Cmdrunner): CmdRunner to be used [default: CmdRunner.new]
-
platforms_handler (PlatformsHandler): Platforms handler to be used [default: PlatformsHandler.new]
-
nodes_handler (NodesHandler): Nodes handler to be used [default: NodesHandler.new]
-
actions_executor (ActionsExecutor): Actions Executor to be used for the tests [default: ActionsExecutor.new]
-
deployer (Deployer): Deployer to be used for the tests needed why-run deployments [default: Deployer.new]
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
# File 'lib/hybrid_platforms_conductor/tests_runner.rb', line 52 def initialize( logger: Logger.new(STDOUT), logger_stderr: Logger.new(STDERR), config: Config.new, cmd_runner: CmdRunner.new, platforms_handler: PlatformsHandler.new, nodes_handler: NodesHandler.new, actions_executor: ActionsExecutor.new, deployer: Deployer.new ) init_loggers(logger, logger_stderr) @config = config @cmd_runner = cmd_runner @platforms_handler = platforms_handler @nodes_handler = nodes_handler @actions_executor = actions_executor @deployer = deployer @platforms_handler.inject_dependencies(nodes_handler: @nodes_handler, actions_executor: @actions_executor) Test.nodes_handler = nodes_handler @tests_plugins = Plugins.new(:test, logger: @logger, logger_stderr: @logger_stderr) # The list of tests reports plugins, with their associated class # Hash< Symbol, Class > @reports_plugins = Plugins.new(:test_report, logger: @logger, logger_stderr: @logger_stderr) # Register test classes from platforms @platforms_handler.known_platforms.each do |platform| if platform.respond_to?(:tests) platform.tests.each do |test_name, test_class| @tests_plugins[test_name] = test_class end end end # Do we skip running check-node? @skip_run = false # List of tests to be performed @tests = [] # List of reports to be used @reports = [] @max_threads_connection_on_nodes = 64 @max_threads_nodes = 8 @max_threads_platforms = 8 end |
Instance Attribute Details
#max_threads_connection_on_nodes ⇒ Object
Number of threads max to use for tests connecting to nodes [default: 64] Integer
31 32 33 |
# File 'lib/hybrid_platforms_conductor/tests_runner.rb', line 31 def max_threads_connection_on_nodes @max_threads_connection_on_nodes end |
#max_threads_nodes ⇒ Object
Number of threads max to use for tests running at node level [default: 8] Integer
35 36 37 |
# File 'lib/hybrid_platforms_conductor/tests_runner.rb', line 35 def max_threads_nodes @max_threads_nodes end |
#max_threads_platforms ⇒ Object
Number of threads max to use for tests running at platform level [default: 8] Integer
39 40 41 |
# File 'lib/hybrid_platforms_conductor/tests_runner.rb', line 39 def max_threads_platforms @max_threads_platforms end |
#reports ⇒ Object
List of reports to use [default: []] Array<Symbol>
23 24 25 |
# File 'lib/hybrid_platforms_conductor/tests_runner.rb', line 23 def reports @reports end |
#skip_run ⇒ Object
Do we skip running check-node? [default: false] Boolean
27 28 29 |
# File 'lib/hybrid_platforms_conductor/tests_runner.rb', line 27 def skip_run @skip_run end |
#tests ⇒ Object
List of tests to execute [default: []] Array<Symbol>
19 20 21 |
# File 'lib/hybrid_platforms_conductor/tests_runner.rb', line 19 def tests @tests end |
Instance Method Details
#options_parse(options_parser) ⇒ Object
Complete an option parser with options meant to control this tests runner
- Parameters
-
options_parser (OptionParser): The option parser to complete
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
# File 'lib/hybrid_platforms_conductor/tests_runner.rb', line 98 def () .separator '' .separator 'Tests runner options:' .on('-i', '--tests-list FILE_NAME', 'Specify a tests file name. The file should contain a list of tests name (1 per line). Can be used several times.') do |file_name| @tests.concat( File.read(file_name). split("\n"). reject { |line| line.strip.empty? || line =~ /^#.+/ }. map(&:to_sym) ) end .on('-k', '--skip-run', 'Skip running the check-node commands for real, and just analyze existing run logs.') do @skip_run = true end .on('-r', '--report REPORT', "Specify a report name. Can be used several times. Can be all for all reports. Possible values: #{@reports_plugins.keys.sort.join(', ')} (defaults to stdout).") do |report| @reports << report.to_sym end .on('-t', '--test TEST', "Specify a test name. Can be used several times. Can be all for all tests. Possible values: #{@tests_plugins.keys.sort.join(', ')} (defaults to all).") do |test_name| @tests << test_name.to_sym end .on('--max-threads-connections NBR_THREADS', "Specify the max number of threads to parallelize tests connecting on nodes (defaults to #{@max_threads_connection_on_nodes}).") do |nbr_threads| @max_threads_connection_on_nodes = Integer(nbr_threads) end .on('--max-threads-nodes NBR_THREADS', "Specify the max number of threads to parallelize tests at node level (defaults to #{@max_threads_nodes}).") do |nbr_threads| @max_threads_nodes = Integer(nbr_threads) end .on('--max-threads-platforms NBR_THREADS', "Specify the max number of threads to parallelize tests at platform level (defaults to #{@max_threads_platforms}).") do |nbr_threads| @max_threads_platforms = Integer(nbr_threads) end end |
#run_tests(nodes_selectors) ⇒ Object
Run the tests for a defined list of nodes selectors
- Parameters
-
nodes_selectors (Array<Object>): List of nodes selectors on which tests should be run
- Result
-
Integer: An exit code:
-
0: Successful.
-
1: Some tests have failed.
-
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 |
# File 'lib/hybrid_platforms_conductor/tests_runner.rb', line 137 def run_tests(nodes_selectors) # Compute the resolved list of tests to perform @tests << :all if @tests.empty? @tests = @tests_plugins.keys if @tests.include?(:all) @tests.uniq! @tests.sort! @reports = [:stdout] if @reports.empty? @reports = @reports_plugins.keys if @reports.include?(:all) @reports.uniq! @reports.sort! unknown_tests = @tests - @tests_plugins.keys raise "Unknown test names: #{unknown_tests.join(', ')}" unless unknown_tests.empty? @nodes = @nodes_handler.select_nodes(nodes_selectors).uniq.sort # Resolve the expected failures from the config. # Expected failures at node level # Hash< Symbol, Hash< String, String > > # Hash< test_name, Hash< node, reason > > @node_expected_failures = {} @config.expected_failures.each do |expected_failure_info| selected_nodes = @nodes_handler.select_from_nodes_selector_stack(expected_failure_info[:nodes_selectors_stack]) expected_failure_info[:tests].each do |test_name| @node_expected_failures[test_name] = {} unless @node_expected_failures.key?(test_name) selected_nodes.each do |node| if @node_expected_failures[test_name].key?(node) @node_expected_failures[test_name][node] += " + #{expected_failure_info[:reason]}" else @node_expected_failures[test_name][node] = expected_failure_info[:reason] end end end end # Expected failures at platform level # Hash< Symbol, Hash< String, String > > # Hash< test_name, Hash< platform, reason > > @platform_expected_failures = {} @platforms_handler.known_platforms.each do |platform| platform_nodes = platform.known_nodes @node_expected_failures.each do |test_name, expected_failures_for_test| if (platform_nodes - expected_failures_for_test.keys).empty? # We have an expected failure for this test @platform_expected_failures[test_name] = {} unless @platform_expected_failures.key?(test_name) @platform_expected_failures[test_name][platform.name] = expected_failures_for_test.values.uniq.join(' + ') end end end # Keep a list of all tests that have run for the report # Array< Test > @tests_run = [] run_tests_global run_tests_platform run_tests_for_nodes run_tests_connection_on_nodes run_tests_on_check_nodes @tested_platforms = @tests_run.map { |test| test.platform }.compact.uniq.sort # Check that tests that were expected to fail did not succeed. @tests_run.each do |test| if test.executed? expected_failure = test.expected_failure if expected_failure if test.errors.empty? # Should have failed error( "Test #{test} was marked to fail (#{expected_failure}) but it succeeded. Please remove it from the expected failures in case the issue has been resolved.", platform: test.platform, node: test.node, force_failure: true ) else out "Expected failure for #{test} (#{expected_failure}):\n#{test.errors.map { |error| " - #{error}" }.join("\n")}".yellow end end end end # If all tests were executed, make sure that there are no expected failures that have not even been tested. if @tests_plugins.keys - @tests == [] @node_expected_failures.each do |test_name, test_expected_failures| test_expected_failures.each do |node, expected_failure| # Check that a test has been run for this expected failure unless @tests_run.find do |test| test.name == test_name && ( (test.node.nil? && node == '') || (!test.node.nil? && node == test.node) ) end error("A test named #{test_name} for node #{node} was expected to fail (#{expected_failure}), but no test has been run. Please remove it from the expected failures if this expected failure is obsolete.") end end end end # Produce reports @reports.each do |report| begin @reports_plugins[report].new(@logger, @logger_stderr, @config, @nodes_handler, @nodes, @tested_platforms, @tests_run).report rescue log_error "Uncaught exception while producing report #{report}: #{$!}\n#{$!.backtrace.join("\n")}" end end out if @tests_run.all? { |test| test.errors.empty? || !test.expected_failure.nil? } out '===== No unexpected errors ====='.green.bold 0 else out '===== Some errors were found. Check output. ====='.red.bold 1 end end |