@@ -39,12 +39,19 @@ def compare_command_logic(args, project_name, project_version):
3939 tf_github_org = args .github_org
4040 tf_github_repo = args .github_repo
4141 tf_triggering_env = args .triggering_env
42- deployment_type = args .deployment_type
43- deployment_name = args .deployment_name
42+ if args .baseline_deployment_name != "" :
43+ baseline_deployment_name = args .baseline_deployment_name
44+ else :
45+ baseline_deployment_name = args .deployment_name
46+ if args .comparison_deployment_name != "" :
47+ comparison_deployment_name = args .comparison_deployment_name
48+ else :
49+ comparison_deployment_name = args .deployment_name
50+
4451 logging .info (
45- "Using deployment_type ={} and deployment_name={} for the analysis" .format (
46- deployment_type ,
47- deployment_name ,
52+ "Using baseline deployment_name ={} and comparison deployment_name={} for the analysis" .format (
53+ baseline_deployment_name ,
54+ comparison_deployment_name ,
4855 )
4956 )
5057 from_ts_ms = args .from_timestamp
@@ -115,32 +122,35 @@ def compare_command_logic(args, project_name, project_version):
115122 used_key = testcases_metric_context_path_setname
116123
117124 tags_regex_string = re .compile (args .testname_regex )
125+ if args .test != "" :
126+ test_names = args .test .split ("," )
127+ logging .info ("Using test name {}" .format (test_names ))
128+ else :
129+ try :
130+ test_names = rts .smembers (used_key )
131+ test_names = list (test_names )
132+ test_names .sort ()
133+ final_test_names = []
134+ for test_name in test_names :
135+ test_name = test_name .decode ()
136+ match_obj = re .search (tags_regex_string , test_name )
137+ if match_obj is not None :
138+ final_test_names .append (test_name )
139+ test_names = final_test_names
118140
119- try :
120- test_names = rts .smembers (used_key )
121- test_names = list (test_names )
122- test_names .sort ()
123- final_test_names = []
124- for test_name in test_names :
125- test_name = test_name .decode ()
126- match_obj = re .search (tags_regex_string , test_name )
127- if match_obj is not None :
128- final_test_names .append (test_name )
129- test_names = final_test_names
141+ except redis .exceptions .ResponseError as e :
142+ logging .warning (
143+ "Error while trying to fetch test cases set (key={}) {}. " .format (
144+ used_key , e .__str__ ()
145+ )
146+ )
147+ pass
130148
131- except redis .exceptions .ResponseError as e :
132149 logging .warning (
133- "Error while trying to fetch test cases set (key={}) {} . " .format (
134- used_key , e . __str__ ( )
150+ "Based on test- cases set (key={}) we have {} comparison points . " .format (
151+ used_key , len ( test_names )
135152 )
136153 )
137- pass
138-
139- logging .warning (
140- "Based on test-cases set (key={}) we have {} comparison points. " .format (
141- used_key , len (test_names )
142- )
143- )
144154 profilers_artifacts_matrix = []
145155 detected_regressions = []
146156 total_improvements = 0
@@ -154,23 +164,32 @@ def compare_command_logic(args, project_name, project_version):
154164 "{}={}" .format (by_str , baseline_str ),
155165 "metric={}" .format (metric_name ),
156166 "{}={}" .format (test_filter , test_name ),
157- "deployment_type ={}" .format (deployment_type ),
158- "deployment_name ={}" .format (deployment_name ),
167+ "deployment_name ={}" .format (baseline_deployment_name ),
168+ "triggering_env ={}" .format (tf_triggering_env ),
159169 ]
160170 filters_comparison = [
161171 "{}={}" .format (by_str , comparison_str ),
162172 "metric={}" .format (metric_name ),
163173 "{}={}" .format (test_filter , test_name ),
164- "deployment_type ={}" .format (deployment_type ),
165- "deployment_name ={}" .format (deployment_name ),
174+ "deployment_name ={}" .format (comparison_deployment_name ),
175+ "triggering_env ={}" .format (tf_triggering_env ),
166176 ]
167177 baseline_timeseries = rts .ts ().queryindex (filters_baseline )
168178 comparison_timeseries = rts .ts ().queryindex (filters_comparison )
179+
180+ # avoiding target time-series
181+ comparison_timeseries = [x for x in comparison_timeseries if "target" not in x ]
182+ baseline_timeseries = [x for x in baseline_timeseries if "target" not in x ]
169183 progress .update ()
184+ if args .verbose :
185+ logging .info ("Baseline timeseries {}" .format (len (baseline_timeseries )))
186+ logging .info ("Comparison timeseries {}" .format (len (comparison_timeseries )))
170187 if len (baseline_timeseries ) != 1 :
171188 if args .verbose :
172189 logging .warning (
173- "Baseline timeseries {}" .format (len (baseline_timeseries ))
190+ "Skipping this test given the value of timeseries !=1. Baseline timeseries {}" .format (
191+ len (baseline_timeseries )
192+ )
174193 )
175194 continue
176195 else :
@@ -326,8 +345,8 @@ def compare_command_logic(args, project_name, project_version):
326345 ),
327346 headers = [
328347 "Test Case" ,
329- "Baseline (median obs. +- std.dev)" ,
330- "Comparison (median obs. +- std.dev)" ,
348+ "Baseline {} (median obs. +- std.dev)" . format ( baseline_branch ) ,
349+ "Comparison {} (median obs. +- std.dev)" . format ( comparison_branch ) ,
331350 "% change ({})" .format (metric_mode ),
332351 "Note" ,
333352 ],
0 commit comments