提交 cf4a1a46 编写于 作者: S stevenj

add options to use either original Fortran-derived DIRECT code or new C...

add options to use either original Fortran-derived DIRECT code or new C version, use 1e-4 magic epsilon parameter of Jones, dynamically set max #func evals in direct code and increase defaults, and add another performance hack to cdirect

darcs-hash:20070829061939-c8de0-1294a9cc044efa3cb7b13e65aae18b45e18d1237.gz
上级 09af4e58
......@@ -43,6 +43,8 @@ void nlopt_version(int *major, int *minor, int *bugfix)
static const char nlopt_algorithm_names[NLOPT_NUM_ALGORITHMS][128] = {
"DIRECT (global)",
"DIRECT-L (global)",
"Original DIRECT version (global)",
"Original DIRECT-L version (global)",
"Subplex (local)",
"StoGO (global)",
"StoGO with randomized search (global)",
......@@ -228,21 +230,22 @@ static nlopt_result nlopt_minimize_(
switch (algorithm) {
case NLOPT_GLOBAL_DIRECT:
case NLOPT_GLOBAL_DIRECT_L:
#if 0
return cdirect(n, f, f_data, lb, ub, x, fmin, &stop, 0,
return cdirect(n, f, f_data, lb, ub, x, fmin, &stop, 1e-4,
algorithm == NLOPT_GLOBAL_DIRECT ? 0 : 1);
#endif
case NLOPT_GLOBAL_ORIG_DIRECT:
case NLOPT_GLOBAL_ORIG_DIRECT_L:
{
int iret;
d.xtmp = (double *) malloc(sizeof(double) * n*2);
if (!d.xtmp) return NLOPT_OUT_OF_MEMORY;
memcpy(d.xtmp + n, x, sizeof(double) * n); d.x0 = d.xtmp + n;
iret = direct_optimize(f_direct, &d, n, lb, ub, x, fmin,
maxeval, 500, ftol_rel, ftol_abs,
maxeval, 500, 1e-4, 1e-4,
xtol_rel, xtol_rel,
DIRECT_UNKNOWN_FGLOBAL, -1.0,
NULL,
algorithm == NLOPT_GLOBAL_DIRECT
algorithm == NLOPT_GLOBAL_ORIG_DIRECT
? DIRECT_ORIGINAL
: DIRECT_GABLONSKY);
recenter_x(n, x, lb, ub, d.x0, x);
......
......@@ -12,13 +12,19 @@ typedef double (*nlopt_func)(int n, const double *x,
typedef enum {
/* non-gradient algorithms */
NLOPT_GLOBAL_DIRECT = 0,
NLOPT_GLOBAL_DIRECT_L,
NLOPT_GLOBAL_ORIG_DIRECT,
NLOPT_GLOBAL_ORIG_DIRECT_L,
NLOPT_LOCAL_SUBPLEX,
/* gradient-based algorithms */
NLOPT_GLOBAL_STOGO,
NLOPT_GLOBAL_STOGO_RANDOMIZED,
NLOPT_LOCAL_LBFGS,
NLOPT_NUM_ALGORITHMS /* not an algorithm, just the number of them */
......
......@@ -249,16 +249,36 @@ static int convex_hull(rb_tree *t, double **hull)
if (xmin == xmax) return nhull;
/* set nmax = min mode with x == xmax */
#if 0
while (nmax->k[0] == xmax)
nmax = rb_tree_pred(nmax); /* non-NULL since xmin != xmax */
nmax = rb_tree_succ(nmax);
#else
/* performance hack (see also below) */
{
double kshift[2];
kshift[0] = xmax * (1 - 1e-13);
kshift[1] = -HUGE_VAL;
nmax = rb_tree_find_gt(t, kshift); /* non-NULL since xmin != xmax */
}
#endif
ymaxmin = nmax->k[1];
minslope = (ymaxmin - yminmin) / (xmax - xmin);
/* set n = first node with x != xmin */
#if 0
while (n->k[0] == xmin)
n = rb_tree_succ(n); /* non-NULL since xmin != xmax */
#else
/* performance hack (see also below) */
{
double kshift[2];
kshift[0] = xmin * (1 + 1e-13);
kshift[1] = -HUGE_VAL;
n = rb_tree_find_gt(t, kshift); /* non-NULL since xmin != xmax */
}
#endif
for (; n != nmax; n = rb_tree_succ(n)) {
double *k = n->k;
......
......@@ -54,9 +54,9 @@
doublereal d__1;
/* constants (FIXME: change to variable?) */
const integer MAXFUNC = 90000;
const integer MAXDEEP = 600;
const integer MAXDIV = 3000;
integer MAXFUNC = *maxf <= 0 ? 100050 : *maxf + 50;
const integer MAXDEEP = 1000;
const integer MAXDIV = 5000;
/* Local variables */
integer increase;
......@@ -475,7 +475,7 @@
newtosample = 0;
i__2 = maxpos;
for (j = 1; j <= i__2; ++j) {
actdeep = s[j + 2999];
actdeep = s[j + MAXDIV-1];
/* +-----------------------------------------------------------------------+ */
/* | If the actual index is a point to sample, do it. | */
/* +-----------------------------------------------------------------------+ */
......@@ -486,7 +486,7 @@
actdeep_div__ = direct_dirgetmaxdeep_(&s[j - 1], length, &MAXFUNC,
n);
delta = thirds[actdeep_div__ + 1];
actdeep = s[j + 2999];
actdeep = s[j + MAXDIV-1];
/* +-----------------------------------------------------------------------+ */
/* | If the current dept of division is only one under the maximal allowed | */
/* | dept, stop the computation. | */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册