13
13
14
14
class TestEvaluator (AbstractEvaluator ):
15
15
16
- def __init__ (self , Datamanager , output_dir ,
16
+ def __init__ (self , Datamanager , backend ,
17
17
configuration = None ,
18
18
with_predictions = False ,
19
19
all_scoring_functions = False ,
20
20
seed = 1 ):
21
21
super (TestEvaluator , self ).__init__ (
22
- Datamanager , output_dir , configuration ,
22
+ Datamanager , backend , configuration ,
23
23
with_predictions = with_predictions ,
24
24
all_scoring_functions = all_scoring_functions ,
25
25
seed = seed ,
@@ -71,16 +71,19 @@ def predict_and_loss(self, train=False):
71
71
72
72
# create closure for evaluating an algorithm
73
73
# Has a stupid name so nosetests doesn't regard it as a test
74
- def eval_t (queue , config , data , tmp_dir , seed , num_run , subsample ,
74
+ def eval_t (queue , config , data , backend , seed , num_run , subsample ,
75
75
with_predictions , all_scoring_functions ,
76
76
output_y_test ):
77
- evaluator = TestEvaluator (data , tmp_dir , config ,
78
- seed = seed , with_predictions = with_predictions ,
77
+ evaluator = TestEvaluator (Datamanager = data , configuration = config ,
78
+ backend = backend , seed = seed ,
79
+ with_predictions = with_predictions ,
79
80
all_scoring_functions = all_scoring_functions )
80
81
81
82
loss , opt_pred , valid_pred , test_pred = evaluator .fit_predict_and_loss ()
82
83
duration , result , seed , run_info = evaluator .finish_up (
83
84
loss , opt_pred , valid_pred , test_pred , file_output = False )
84
85
85
86
status = StatusType .SUCCESS
86
- queue .put ((duration , result , seed , run_info , status ))
87
+ queue .put ((duration , result , seed , run_info , status ))
88
+
89
+
0 commit comments