diff --git a/tpot2/base_evolver.py b/tpot2/base_evolver.py index 1a6da47a..401b2053 100644 --- a/tpot2/base_evolver.py +++ b/tpot2/base_evolver.py @@ -526,11 +526,12 @@ def step(self,): def one_generation_step(self, ): #your EA Algorithm goes here - n_survivors = max(1,int(self.cur_population_size*self.survival_percentage)) #always keep at least one individual - #Get survivors from current population - weighted_scores = self.population.get_column(self.population.population, column_names=self.objective_names) * self.objective_function_weights - new_population_index = np.ravel(self.survival_selector(weighted_scores, k=n_survivors)) #TODO make it clear that we are concatenating scores... - self.population.set_population(np.array(self.population.population)[new_population_index]) + if self.survival_selector is not None: + n_survivors = max(1,int(self.cur_population_size*self.survival_percentage)) #always keep at least one individual + #Get survivors from current population + weighted_scores = self.population.get_column(self.population.population, column_names=self.objective_names) * self.objective_function_weights + new_population_index = np.ravel(self.survival_selector(weighted_scores, k=n_survivors)) #TODO make it clear that we are concatenating scores... + self.population.set_population(np.array(self.population.population)[new_population_index]) weighted_scores = self.population.get_column(self.population.population, column_names=self.objective_names) * self.objective_function_weights #number of crossover pairs and mutation only parent to generate diff --git a/tpot2/evolutionary_algorithms/__init__.py b/tpot2/evolutionary_algorithms/__init__.py index daad055a..139597f9 100644 --- a/tpot2/evolutionary_algorithms/__init__.py +++ b/tpot2/evolutionary_algorithms/__init__.py @@ -1,2 +1,2 @@ -from .simple_evolver import SimpleEvolver + diff --git a/tpot2/evolutionary_algorithms/simple_evolver.py b/tpot2/evolutionary_algorithms/simple_evolver.py deleted file mode 100644 index b69faa43..00000000 --- a/tpot2/evolutionary_algorithms/simple_evolver.py +++ /dev/null @@ -1,57 +0,0 @@ -import numpy as np -import tpot2 - - -class SimpleEvolver(tpot2.BaseEvolver): - - def one_generation_step(self): #EA Algorithm goes here - - - weighted_scores = self.population.get_column(self.population.population, column_names=self.objective_names) * self.objective_function_weights - - #number of crossover pairs and mutation only parent to generate - n_crossover = int(self.cur_population_size*self.crossover_probability) - n_crossover_then_mutate = int(self.cur_population_size*self.crossover_then_mutate_probability) - n_mutate_then_crossover = int(self.cur_population_size*self.mutate_then_crossover_probability) - n_total_crossover_pairs = n_crossover + n_crossover_then_mutate + n_mutate_then_crossover - n_mutate_parents = self.cur_population_size - n_total_crossover_pairs - - #get crossover pairs - if n_total_crossover_pairs > 0: - cx_parents_index = self.parent_selector(weighted_scores, k=n_total_crossover_pairs, n_parents=self.n_parents, ) #TODO make it clear that we are concatenating scores... - cx_var_ops = np.concatenate([ np.repeat("crossover",n_crossover), - np.repeat("mutate_then_crossover",n_mutate_then_crossover), - np.repeat("crossover_then_mutate",n_crossover_then_mutate), - ]) - else: - cx_parents_index = [] - cx_var_ops = [] - - #get mutation only parents - if n_mutate_parents > 0: - m_parents_index = self.parent_selector(weighted_scores, k=n_mutate_parents, n_parents=1, ) #TODO make it clear that we are concatenating scores... - m_var_ops = np.repeat("mutate",len(m_parents_index)) - else: - m_parents_index = [] - m_var_ops = [] - - cx_parents = np.array(self.population.population)[cx_parents_index] - m_parents = np.array(self.population.population)[m_parents_index] - parents = list(cx_parents) + list(m_parents) - - var_ops = np.concatenate([cx_var_ops, m_var_ops]) - offspring = self.population.create_offspring(parents, var_ops, n_jobs=self.n_jobs) - self.population.update_column(offspring, column_names="Generation", data=self.generation, ) - #print("done making offspring") - - #print("evaluating") - self.evaluate_population() - #print("done evaluating") - - #Get survivors from current population - n_survivors = max(1,int(self.cur_population_size*self.survival_percentage)) #always keep at least one individual - weighted_scores = self.population.get_column(self.population.population, column_names=self.objective_names) * self.objective_function_weights - new_population_index = np.ravel(self.survival_selector(weighted_scores, k=n_survivors)) #TODO make it clear that we are concatenating scores... - self.population.set_population(np.array(self.population.population)[new_population_index]) - - diff --git a/tpot2/tpot_estimator/estimator.py b/tpot2/tpot_estimator/estimator.py index 5db15055..1593b5bc 100644 --- a/tpot2/tpot_estimator/estimator.py +++ b/tpot2/tpot_estimator/estimator.py @@ -83,6 +83,7 @@ def __init__(self, scorers, verbose = 0, periodic_checkpoint_folder = None, callback: tpot2.CallBackInterface = None, + processes = True, ): ''' @@ -376,6 +377,10 @@ def __init__(self, scorers, callback : tpot2.CallBackInterface, default=None Callback object. Not implemented + processes : bool, default=True + If True, will use multiprocessing to parallelize the optimization process. If False, will use threading. + True seems to perform better. However, False is required for interactive debugging. + ''' # sklearn BaseEstimator must have a corresponding attribute for each parameter. @@ -443,6 +448,8 @@ def __init__(self, scorers, self.objective_function_names = objective_function_names + self.processes = processes + #Initialize other used params @@ -508,7 +515,7 @@ def fit(self, X, y): silence_logs = 50 cluster = LocalCluster(n_workers=self.n_jobs, #if no client is passed in and no global client exists, create our own threads_per_worker=1, - processes=True, + processes=self.processes, silence_logs=silence_logs, memory_limit=self.memory_limit) _client = Client(cluster) diff --git a/tpot2/tpot_estimator/templates/tpottemplates.py b/tpot2/tpot_estimator/templates/tpottemplates.py index 099e1c93..48845b77 100644 --- a/tpot2/tpot_estimator/templates/tpottemplates.py +++ b/tpot2/tpot_estimator/templates/tpottemplates.py @@ -66,6 +66,7 @@ def __init__( self, verbose=0, periodic_checkpoint_folder=None, callback: tpot2.CallBackInterface=None, + processes = True, ): super(TPOTRegressor,self).__init__( scorers=scorers, @@ -127,6 +128,7 @@ def __init__( self, verbose=verbose, periodic_checkpoint_folder=periodic_checkpoint_folder, callback=callback, + processes=processes, ) @@ -191,6 +193,7 @@ def __init__( self, verbose=0, periodic_checkpoint_folder=None, callback: tpot2.CallBackInterface=None, + processes = True, ): super(TPOTClassifier,self).__init__( scorers=scorers, @@ -252,6 +255,7 @@ def __init__( self, verbose=verbose, periodic_checkpoint_folder=periodic_checkpoint_folder, callback=callback, + processes = processes, )