Skip to content

Commit e617733

Browse files
authored
Adding verbose switch and fixing unclosed pool (#395)
Resolves #340 * Implementing the verbose argument * Implementing the verbose switch The verbose switch was added to optimize() method to enable or disable the logs and progress bar. * Implementing the verbose switch The verbose switch was added to optimize() method. Pool of Processes is closed at the end of calculations * Pool of processes is closed Pool of Processes is closed at the end of calculations. * Pool of Processes is closed Pool of Processes is closed at the end of calculations. * Fixed the pool.close() if n_process=None * Fixed the pool.close() if n_process=None * Fixed the pool.close() if n_process=None * Implementing the verbose switch pool.close() and verbose switch were added to optimize() method.
1 parent b3c60b0 commit e617733

File tree

4 files changed

+70
-21
lines changed

4 files changed

+70
-21
lines changed

pyswarms/discrete/binary.py

+19-6
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def __init__(
137137
self.vh = VelocityHandler(strategy=vh_strategy)
138138
self.name = __name__
139139

140-
def optimize(self, objective_func, iters, n_processes=None, **kwargs):
140+
def optimize(self, objective_func, iters, n_processes=None, verbose=False, **kwargs):
141141
"""Optimize the swarm for a number of iterations
142142
143143
Performs the optimization to evaluate the objective
@@ -152,6 +152,8 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
152152
n_processes : int, optional
153153
number of processes to use for parallel particle evaluation
154154
Defaut is None with no parallelization.
155+
verbose : bool
156+
enable or disable the logs and progress bar (default: False = enable logs)
155157
kwargs : dict
156158
arguments for objective function
157159
@@ -161,10 +163,16 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
161163
the local best cost and the local best position among the
162164
swarm.
163165
"""
166+
# Apply verbosity
167+
if verbose:
168+
logginglevel = logging.NOTSET
169+
else:
170+
logginglevel = logging.INFO
171+
164172
self.rep.log("Obj. func. args: {}".format(kwargs), lvl=logging.DEBUG)
165173
self.rep.log(
166174
"Optimize for {} iters with {}".format(iters, self.options),
167-
lvl=logging.INFO,
175+
lvl=logginglevel,
168176
)
169177
# Populate memory of the handlers
170178
self.vh.memory = self.swarm.position
@@ -174,7 +182,7 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
174182

175183
self.swarm.pbest_cost = np.full(self.swarm_size[0], np.inf)
176184
ftol_history = [None] * self.ftol_iter
177-
for i in self.rep.pbar(iters, self.name):
185+
for i in range(iters) if verbose else self.rep.pbar(iters, self.name):
178186
# Compute cost for current position and personal best
179187
self.swarm.current_cost = compute_objective_function(
180188
self.swarm, objective_func, pool, **kwargs
@@ -187,8 +195,9 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
187195
self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(
188196
self.swarm, p=self.p, k=self.k
189197
)
190-
# Print to console
191-
self.rep.hook(best_cost=self.swarm.best_cost)
198+
if not verbose:
199+
# Print to console
200+
self.rep.hook(best_cost=self.swarm.best_cost)
192201
# Save to history
193202
hist = self.ToHistory(
194203
best_cost=self.swarm.best_cost,
@@ -219,8 +228,12 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
219228
"Optimization finished | best cost: {}, best pos: {}".format(
220229
final_best_cost, final_best_pos
221230
),
222-
lvl=logging.INFO,
231+
lvl=logginglevel,
223232
)
233+
# Close Pool of Processes
234+
if n_processes is not None:
235+
pool.close()
236+
224237
return (final_best_cost, final_best_pos)
225238

226239
def _compute_position(self, swarm):

pyswarms/single/general_optimizer.py

+17-5
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ def __init__(
187187
self.vh = VelocityHandler(strategy=vh_strategy)
188188
self.name = __name__
189189

190-
def optimize(self, objective_func, iters, n_processes=None, **kwargs):
190+
def optimize(self, objective_func, iters, n_processes=None, verbose=False, **kwargs):
191191
"""Optimize the swarm for a number of iterations
192192
193193
Performs the optimization to evaluate the objective
@@ -201,6 +201,8 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
201201
number of iterations
202202
n_processes : int
203203
number of processes to use for parallel particle evaluation (default: None = no parallelization)
204+
verbose : bool
205+
enable or disable the logs and progress bar (default: False = enable logs)
204206
kwargs : dict
205207
arguments for the objective function
206208
@@ -209,10 +211,16 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
209211
tuple
210212
the global best cost and the global best position.
211213
"""
214+
# Apply verbosity
215+
if verbose:
216+
logginglevel = logging.NOTSET
217+
else:
218+
logginglevel = logging.INFO
219+
212220
self.rep.log("Obj. func. args: {}".format(kwargs), lvl=logging.DEBUG)
213221
self.rep.log(
214222
"Optimize for {} iters with {}".format(iters, self.options),
215-
lvl=logging.INFO,
223+
lvl=logginglevel,
216224
)
217225

218226
# Populate memory of the handlers
@@ -224,7 +232,7 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
224232

225233
self.swarm.pbest_cost = np.full(self.swarm_size[0], np.inf)
226234
ftol_history = [None] * self.ftol_iter
227-
for i in self.rep.pbar(iters, self.name):
235+
for i in range(iters) if verbose else self.rep.pbar(iters, self.name):
228236
# Compute cost for current position and personal best
229237
# fmt: off
230238
self.swarm.current_cost = compute_objective_function(self.swarm, objective_func, pool=pool, **kwargs)
@@ -236,7 +244,8 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
236244
self.swarm, **self.options
237245
)
238246
# Print to console
239-
self.rep.hook(best_cost=self.swarm.best_cost)
247+
if not verbose:
248+
self.rep.hook(best_cost=self.swarm.best_cost)
240249
hist = self.ToHistory(
241250
best_cost=self.swarm.best_cost,
242251
mean_pbest_cost=np.mean(self.swarm.pbest_cost),
@@ -271,6 +280,9 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
271280
"Optimization finished | best cost: {}, best pos: {}".format(
272281
final_best_cost, final_best_pos
273282
),
274-
lvl=logging.INFO,
283+
lvl=logginglevel,
275284
)
285+
# Close Pool of Processes
286+
if n_processes is not None:
287+
pool.close()
276288
return (final_best_cost, final_best_pos)

pyswarms/single/global_best.py

+17-5
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def __init__(
148148
self.vh = VelocityHandler(strategy=vh_strategy)
149149
self.name = __name__
150150

151-
def optimize(self, objective_func, iters, n_processes=None, **kwargs):
151+
def optimize(self, objective_func, iters, n_processes=None, verbose=False, **kwargs):
152152
"""Optimize the swarm for a number of iterations
153153
154154
Performs the optimization to evaluate the objective
@@ -162,6 +162,8 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
162162
number of iterations
163163
n_processes : int
164164
number of processes to use for parallel particle evaluation (default: None = no parallelization)
165+
verbose : bool
166+
enable or disable the logs and progress bar (default: False = enable logs)
165167
kwargs : dict
166168
arguments for the objective function
167169
@@ -170,11 +172,17 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
170172
tuple
171173
the global best cost and the global best position.
172174
"""
175+
176+
# Apply verbosity
177+
if verbose:
178+
logginglevel = logging.NOTSET
179+
else:
180+
logginglevel = logging.INFO
173181

174182
self.rep.log("Obj. func. args: {}".format(kwargs), lvl=logging.DEBUG)
175183
self.rep.log(
176184
"Optimize for {} iters with {}".format(iters, self.options),
177-
lvl=logging.INFO,
185+
lvl=logginglevel,
178186
)
179187
# Populate memory of the handlers
180188
self.bh.memory = self.swarm.position
@@ -185,7 +193,7 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
185193

186194
self.swarm.pbest_cost = np.full(self.swarm_size[0], np.inf)
187195
ftol_history = [None] * self.ftol_iter
188-
for i in self.rep.pbar(iters, self.name):
196+
for i in range(iters) if verbose else self.rep.pbar(iters, self.name):
189197
# Compute cost for current position and personal best
190198
# fmt: off
191199
self.swarm.current_cost = compute_objective_function(self.swarm, objective_func, pool=pool, **kwargs)
@@ -194,7 +202,8 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
194202
best_cost_yet_found = self.swarm.best_cost
195203
self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(self.swarm)
196204
# fmt: on
197-
self.rep.hook(best_cost=self.swarm.best_cost)
205+
if not verbose:
206+
self.rep.hook(best_cost=self.swarm.best_cost)
198207
# Save to history
199208
hist = self.ToHistory(
200209
best_cost=self.swarm.best_cost,
@@ -228,6 +237,9 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
228237
"Optimization finished | best cost: {}, best pos: {}".format(
229238
final_best_cost, final_best_pos
230239
),
231-
lvl=logging.INFO,
240+
lvl=logginglevel,
232241
)
242+
# Close Pool of Processes
243+
if n_processes is not None:
244+
pool.close()
233245
return (final_best_cost, final_best_pos)

pyswarms/single/local_best.py

+17-5
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ def __init__(
172172
self.vh = VelocityHandler(strategy=vh_strategy)
173173
self.name = __name__
174174

175-
def optimize(self, objective_func, iters, n_processes=None, **kwargs):
175+
def optimize(self, objective_func, iters, n_processes=None, verbose=False, **kwargs):
176176
"""Optimize the swarm for a number of iterations
177177
178178
Performs the optimization to evaluate the objective
@@ -186,6 +186,8 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
186186
number of iterations
187187
n_processes : int
188188
number of processes to use for parallel particle evaluation (default: None = no parallelization)
189+
verbose : bool
190+
enable or disable the logs and progress bar (default: False = enable logs)
189191
kwargs : dict
190192
arguments for the objective function
191193
@@ -195,10 +197,16 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
195197
the local best cost and the local best position among the
196198
swarm.
197199
"""
200+
# Apply verbosity
201+
if verbose:
202+
logginglevel = logging.NOTSET
203+
else:
204+
logginglevel = logging.INFO
205+
198206
self.rep.log("Obj. func. args: {}".format(kwargs), lvl=logging.DEBUG)
199207
self.rep.log(
200208
"Optimize for {} iters with {}".format(iters, self.options),
201-
lvl=logging.INFO,
209+
lvl=logginglevel,
202210
)
203211
# Populate memory of the handlers
204212
self.bh.memory = self.swarm.position
@@ -209,7 +217,7 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
209217

210218
self.swarm.pbest_cost = np.full(self.swarm_size[0], np.inf)
211219
ftol_history = [None] * self.ftol_iter
212-
for i in self.rep.pbar(iters, self.name):
220+
for i in range(iters) if verbose else self.rep.pbar(iters, self.name):
213221
# Compute cost for current position and personal best
214222
self.swarm.current_cost = compute_objective_function(
215223
self.swarm, objective_func, pool=pool, **kwargs
@@ -222,7 +230,8 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
222230
self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(
223231
self.swarm, p=self.p, k=self.k
224232
)
225-
self.rep.hook(best_cost=np.min(self.swarm.best_cost))
233+
if not verbose:
234+
self.rep.hook(best_cost=np.min(self.swarm.best_cost))
226235
# Save to history
227236
hist = self.ToHistory(
228237
best_cost=self.swarm.best_cost,
@@ -256,6 +265,9 @@ def optimize(self, objective_func, iters, n_processes=None, **kwargs):
256265
"Optimization finished | best cost: {}, best pos: {}".format(
257266
final_best_cost, final_best_pos
258267
),
259-
lvl=logging.INFO,
268+
lvl=logginglevel,
260269
)
270+
# Close Pool of Processes
271+
if n_processes is not None:
272+
pool.close()
261273
return (final_best_cost, final_best_pos)

0 commit comments

Comments
 (0)