This repository has been archived on 2022-02-23. You can view files and clone it, but cannot push or open issues or pull requests.
pathfinder/tests/test_scipy_optimize.py

165 lines
4.7 KiB
Python

import numpy
import scipy.optimize
import pytest
def circ_cost(radius, center=(0, 0)):
def cf(pt):
pt2 = numpy.array(pt) - numpy.array(center)
return (radius**2 - pt2.dot(pt2))
return cf
def test_circ_cost():
cost = circ_cost(5)
actual = cost([3, 4])
expected = 0
assert actual == expected
cost = circ_cost(13, [12, 5])
actual = cost([0, 0])
expected = 0
assert actual == expected
def test_find_sols():
c1 = circ_cost(5)
c2 = circ_cost(13, [8, -8])
def costs(pt):
return numpy.array(
[c1(pt), c2(pt)]
)
def jac(pt):
x, y = pt
return numpy.array([[-2 * x, -2 * y], [-2 * (x - 8), -2 * (y + 8)]])
print(scipy.optimize.minimize(lambda x: costs(x).dot(costs(x)), numpy.array([1, 2])))
#
# message, iterations, result = pathfinder.gradient_descent.find_sols(costs, jac, step_size=0.01, max_iterations=5000, initial=(2, 10), desired_cost=1e-6)
# numpy.testing.assert_almost_equal(
# result, (3, 4),
# decimal=7, err_msg='the result was off', verbose=True
# )
def dipole_cost(vn, xn_raw):
xn = numpy.array(xn_raw)
def dc(pt):
p = pt[0:3]
s = pt[3:6]
diff = xn - s
return (vn * (numpy.linalg.norm(diff)**3)) - p.dot(diff)
return dc
def test_actual_dipole_finding():
def c0(pt):
p = pt[0:3]
return (p.dot(p) - 35)
v1 = -0.05547767706400186526225414
v2 = -0.06018573388098888319642888
v3 = -0.06364032191901859480476888
v4 = -0.06488383879243851188402150
v5 = -0.06297148063759813929659130
v6 = -0.05735489606460216
v7 = -0.07237320672886623
# the 0 here is a red herring for index purposes later
vns = [0, v1, v2, v3, v4, v5]
# the 0 here is a red herring
xns = [numpy.array([0, 0, n]) for n in range(0, 6)]
# the 0 here is a red herring for index purposes later
vns2 = [0, v1, v2, v3, v4, v5, v6, v7]
# the 0 here is a red herring
xns2 = [numpy.array([0, 0, n]) for n in range(0, 7)]
xns2.append([1, 1, 7])
c1 = dipole_cost(v1, [0, 0, 1])
c2 = dipole_cost(v2, [0, 0, 2])
c3 = dipole_cost(v3, [0, 0, 3])
c4 = dipole_cost(v4, [0, 0, 4])
c5 = dipole_cost(v5, [0, 0, 5])
c6 = dipole_cost(v6, [0, 0, 6])
c6 = dipole_cost(v6, [0, 0, 6])
c7 = dipole_cost(v7, [1, 1, 7])
def costs(pt):
return numpy.array(
[c0(pt), c1(pt), c2(pt), c3(pt), c4(pt), c5(pt)]
)
def costs2(pt):
return numpy.array(
[c0(pt), c1(pt), c2(pt), c3(pt), c4(pt), c5(pt), c6(pt), c7(pt)]
)
def jac_row(n):
def jr(pt):
p = pt[0:3]
s = pt[3:6]
vn = vns2[n]
xn = xns2[n]
diff = xn - s
return [
-diff[0], -diff[1], -diff[2],
p[0] - vn * 3 * numpy.linalg.norm(diff) * (diff)[0],
p[1] - vn * 3 * numpy.linalg.norm(diff) * (diff)[1],
p[2] - vn * 3 * numpy.linalg.norm(diff) * (diff)[2]
]
return jr
def jac(pt):
return numpy.array([
[2 * pt[0], 2 * pt[1], 2 * pt[2], 0, 0, 0],
jac_row(1)(pt),
jac_row(2)(pt),
jac_row(3)(pt),
jac_row(4)(pt),
jac_row(5)(pt),
])
def jac2(pt):
return numpy.array([
[2 * pt[0], 2 * pt[1], 2 * pt[2], 0, 0, 0],
jac_row(1)(pt),
jac_row(2)(pt),
jac_row(3)(pt),
jac_row(4)(pt),
jac_row(5)(pt),
jac_row(6)(pt),
jac_row(7)(pt),
])
def print_result(msg, result):
print(msg)
print(f"\tResult: {result.x}")
print(f"\tSuccess: {result.success}. {result.message}")
try:
print(f"\tFunc evals: {result.nfev}")
except AttributeError as e:
pass
try:
print(f"\tJacb evals: {result.njev}")
except AttributeError as e:
pass
print("Minimising the squared costs")
print(scipy.optimize.minimize(lambda x: costs(x).dot(costs(x)), numpy.array([1, 2, 3, 4, 5, 6])))
# print(scipy.optimize.broyden1(costs, numpy.array([1, 2, 3, 4, 5, 6])))
# print(scipy.optimize.newton_krylov(costs, numpy.array([1, 2, 3, 4, 5, 6])))
# print(scipy.optimize.anderson(costs, numpy.array([1, 2, 3, 4, 5, 6])))
print_result("Using root", scipy.optimize.root(costs, numpy.array([1, 2, 3, 4, 5, 6])))
print_result("Using root with jacobian", scipy.optimize.root(costs, numpy.array([1, 2, 3, 4, 5, 6]), jac=jac, tol=1e-12))
print_result("Using least squares", scipy.optimize.least_squares(costs, numpy.array([1, 2, 3, 4, 5, 6]), gtol=1e-12))
print_result("Using least squares, with jacobian", scipy.optimize.least_squares(costs, numpy.array([1, 2, 3, 4, 5, 6]), jac=jac, ftol=3e-16, gtol=3e-16, xtol=3e-16))
print_result("Using least squares, with jacobian, lm", scipy.optimize.least_squares(costs, numpy.array([1, 2, 3, 4, 5, 6]), jac=jac, ftol=3e-16, gtol=3e-16, xtol=3e-16, method="lm"))
print_result("Using least squares extra dot", scipy.optimize.least_squares(costs2, numpy.array([1, 2, 3, 4, 5, 6])))
print_result("Using least squares extra dot, with jacobian", scipy.optimize.least_squares(costs2, numpy.array([1, 2, 3, 4, 5, 6]), jac=jac2, ftol=1e-12))
print(scipy.optimize.least_squares(costs2, numpy.array([1, 2, 3, 4, 5, 6]), jac=jac2, ftol=1e-12).x[0])