Skip to content

Commit 40a69a0

Browse files
author
Lindon Roberts
committed
First debugging and test script for existing evaluations
1 parent 0b652bd commit 40a69a0

File tree

5 files changed

+68
-3
lines changed

5 files changed

+68
-3
lines changed

dfols/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,3 +45,5 @@
4545
from .solver import *
4646
__all__ = ['solve', 'OptimResults']
4747

48+
from .evaluations_database import *
49+
__all__ += ['EvaluationDatabase']

dfols/controller.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -427,8 +427,8 @@ def initialise_from_database(self, eval_database, number_of_samples, params):
427427
dykstra_tol=params("dykstra.d_tol"))
428428

429429
# Add suitable pre-existing evaluations
430-
module_logger.debug("Adding %g pre-existing evaluations (aside from x0) to initial model" % len(perturbation_idx))
431430
for i, idx in enumerate(perturbation_idx):
431+
module_logger.info("Adding pre-existing evaluation %g to initial model" % idx)
432432
x, rx = eval_database.get_eval(idx)
433433
self.model.change_point(i + 1, x - self.model.xbase, rx, -idx) # use eval_num = -idx
434434

dfols/evaluations_database.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ def select_starting_evals(self, delta, xl=None, xu=None, projections=[], tol=1e-
8989
n = len(xbase)
9090
module_logger.debug("Selecting starting evaluations from existing database")
9191
module_logger.debug("Have %g evaluations to choose from" % len(self))
92-
module_logger.debug("Using base index %g, x0 =" % base_idx, xbase)
92+
module_logger.debug("Using base index %g" % base_idx)
9393

9494
# For linear interpolation, we will use the matrix
9595
# M = [[1, 0], [0, L]] where L has rows (xi-xbase)/delta

dfols/solver.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -165,12 +165,15 @@ def solve_main(objfun, x0, argsf, xl, xu, projections, npt, rhobeg, rhoend, maxf
165165
# Evaluate at x0 (keep nf, nx correct and check for f < 1e-12)
166166
# The hard bit is determining what m = len(r0) should be, and allocating memory appropriately
167167
if r0_avg_old is None:
168+
exit_info = None
168169
if x0_is_eval_database:
169170
# We have already got r(x0), so just extract this information
170171
nf = nf_so_far
171172
nx = nx_so_far
173+
num_samples_run = 1
172174
r0_avg = x0.get_rx(x0.get_starting_eval_idx())
173175
m = len(r0_avg)
176+
module_logger.info("Using pre-existing evaluation %g as starting point" % (x0.get_starting_eval_idx()))
174177
else:
175178
number_of_samples = max(nsamples(rhobeg, rhobeg, 0, nruns_so_far), 1)
176179
# Evaluate the first time...
@@ -188,7 +191,6 @@ def solve_main(objfun, x0, argsf, xl, xu, projections, npt, rhobeg, rhoend, maxf
188191
rvec_list[0, :] = r0
189192
obj_list[0] = obj0
190193
num_samples_run = 1
191-
exit_info = None
192194

193195
for i in range(1, number_of_samples): # skip first eval - already did this
194196
if nf >= maxfun:
@@ -972,6 +974,7 @@ def solve(objfun, x0, h=None, lh=None, prox_uh=None, argsf=(), argsh=(), argspro
972974
assert len(x0) > 0, "evaluation database x0 cannot be empty"
973975
assert 0 <= x0.get_starting_eval_idx() < len(x0), "evaluation database must have valid starting index set"
974976
x0_is_eval_database = True
977+
n = len(x0.get_x(x0.get_starting_eval_idx()))
975978
else:
976979
x0 = np.array(x0).astype(float)
977980
n = len(x0)

examples/existing_evaluations.py

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
"""
2+
Demonstration of using database of existing evaluations to speed up DFO-LS
3+
4+
Test problem is the 'Watson function': for details, see
5+
J. J. More, B. S. Garbow, K. E. Hillstrom. Testing Unconstrained Optimization Software.
6+
ACM Transactions on Mathematical Software, 7:1 (1981), pp. 17-41.
7+
"""
8+
import numpy as np
9+
import dfols
10+
11+
# Define the objective function
12+
def watson(x):
13+
n = len(x)
14+
m = 31
15+
fvec = np.zeros((m,), dtype=float)
16+
17+
for i in range(1, 30): # i=1,...,29
18+
div = float(i) / 29.0
19+
s1 = 0.0
20+
dx = 1.0
21+
for j in range(2, n + 1): # j = 2,...,n
22+
s1 = s1 + (j - 1) * dx * x[j - 1]
23+
dx = div * dx
24+
s2 = 0.0
25+
dx = 1.0
26+
for j in range(1, n + 1): # j = 1,...,n
27+
s2 = s2 + dx * x[j - 1]
28+
dx = div * dx
29+
fvec[i - 1] = s1 - s2 ** 2 - 1.0
30+
31+
fvec[29] = x[0]
32+
fvec[30] = x[1] - x[0] ** 2 - 1.0
33+
34+
return fvec
35+
36+
# Define the starting point
37+
n = 6
38+
x0 = 0.5 * np.ones((n,), dtype=float)
39+
40+
# When n=6, we expect f(x0) ~ 16.4308 and f(xmin) ~ 0.00228767 at xmin ~ [ -0.0157, 1.0124, 1.2604, -1.5137, 0.992996]
41+
42+
# For optional extra output details
43+
import logging
44+
logging.basicConfig(level=logging.INFO, format='%(message)s')
45+
46+
# Now build a database of evaluations
47+
eval_db = dfols.EvaluationDatabase()
48+
eval_db.append(x0, watson(x0), make_starting_eval=True) # make x0 the starting point
49+
50+
# Note: x0, x1 and x2 are colinear, so at least one of x1 and x2 shouldn't be included in the initial model
51+
x1 = np.ones((n,), dtype=float)
52+
x2 = np.zeros((n,), dtype=float)
53+
x3 = np.arange(n).astype(float)
54+
eval_db.append(x1, watson(x1))
55+
eval_db.append(x2, watson(x2))
56+
eval_db.append(x3, watson(x3))
57+
58+
soln = dfols.solve(watson, eval_db) # replace x0 with eval_db
59+
60+
print(soln)

0 commit comments

Comments
 (0)