diff --git a/TrainerClass.py b/TrainerClass.py index c65adbe..cb351bf 100644 --- a/TrainerClass.py +++ b/TrainerClass.py @@ -154,6 +154,11 @@ class eNoseTrainer: def train_and_score_conv1D_v1(self, X_train, X_test, y_train, y_test, epochs=30, num_samples=25): + ray.init(ignore_reinit_error=True) + X_train_ref = ray.put(X_train) + Y_train_ref = ray.put(Y_train) + X_test_ref = ray.put(X_test) + Y_test_ref = ray.put(Y_test) def build_model(config, input_shape, output_dim): model = keras.Sequential([ layers.Conv1D(filters=config['filters'], kernel_size=config['kernel_size'], activation='relu', input_shape=input_shape), @@ -199,8 +204,11 @@ class eNoseTrainer: } scheduler = ASHAScheduler(metric='mse', mode='min', max_t=epochs, grace_period=5, reduction_factor=2) - analysis = tune.run(train_model, config=config_space, num_samples=num_samples, scheduler=scheduler) - + # analysis = tune.run(train_model, config=config_space, num_samples=num_samples, scheduler=scheduler) + analysis = tune.run( + tune.with_parameters(train_model, X_train=ray.get(X_train_ref), Y_train=ray.get(Y_train_ref), X_test=ray.get(X_test_ref), Y_test=ray.get(Y_test_ref)), + config=config_space, num_samples=num_samples, scheduler=scheduler + ) best_config = analysis.get_best_config(metric='mse', mode='min') best_model = build_model(best_config, X_train.shape[1:], Y_train.shape[1]) best_model.fit(X_train, Y_train, epochs=epochs, batch_size=best_config['batch_size'], verbose=0)