在scipy.optimize.minimize中,是否可以给予一个具有2个输出的函数作为目标函数梯度和约束梯度?

68bkxrlz  于 12个月前  发布在  其他
关注(0)|答案(1)|浏览(100)

我有一个非常基本的优化问题,这是一个6维Rosenbrock函数。我使用单纯形梯度作为目标函数和约束,但我想在单个函数中计算梯度,以使其计算成本更低。这是因为目标函数的梯度计算也包含约束梯度计算所需的数据。尽管计算我不能在这里解释我的真实的复杂问题,但是如果你帮我解决这个问题,我会把同样的逻辑应用到我的问题上。
这是我以前尝试过的,但它不起作用。我还试图将其转储到pickle文件中,然后尝试调用它,但在这种情况下,优化无法正常工作。

import numpy as np
from scipy.optimize import minimize, NonlinearConstraint

def rosenbrock_6d(x):
    print("obj is calculating!")
    result = 0
    for i in range(5):
        result += 100 * (x[i + 1] - x[i] ** 2) ** 2 + (1 - x[i]) ** 2
    
    return result

def nonlinear_constraint(x):
    print("Nonlinear constraint is calculating!")
    return 10*x[0]**2 + 11*x[1]**2 + 12*x[2]**2 + 13*x[3]**2 + 14*x[4]**2 + 15*x[5]**2

def StoSAG(x):
    print("OBj StoSAG is calculating!")
    CU = np.eye(6)
    n_pert=10
    u_norm_perturbed = np.random.multivariate_normal(x, CU, n_pert)
    u_norm_perturbed = np.transpose(u_norm_perturbed)
    J_ = np.zeros(n_pert)
    NL_array_pert=np.zeros((n_pert,1))
    for j in range(n_pert):
        J_[j] = rosenbrock_6d(u_norm_perturbed[:, j])
        NL_array_pert[j] = nonlinear_constraint(u_norm_perturbed[:, j])
    J_=np.reshape(J_, (n_pert, 1))
    j=J_- rosenbrock_6d(x)
    U= u_norm_perturbed-np.reshape(x, (6, 1))
    du=np.linalg.pinv(np.transpose(U))
    grad_f=np.matmul(du, j).ravel()
    c_nl=(NL_array_pert- nonlinear_constraint(x))
    g_nl=np.matmul(du, c_nl)
    grad_g=np.transpose(g_nl)
    return grad_f, grad_g

nlc = NonlinearConstraint(nonlinear_constraint, -np.inf,2,jac=StoSAG()[1])

# Example usage:
# Define a sample 6-dimensional point
x_values = (1, 2, 3, 4, 5, 6)
result = minimize(rosenbrock_6d, x_values, method='SLSQP', jac=StoSAG()[0],constraints=nlc,options={'maxiter':200,'disp':True,'iprint':2,'ftol':1e-2})

# Print the result
print("Optimal solution: x =", result.x)
print("Optimal objective function value:", result.fun)

字符串

f3temu5u

f3temu5u1#

有一长串Numpy使用问题,我不会在这里讨论,因为我认为它们超出了范围,但广泛地矢量化更多的东西。
不要使用pickle。你可以使用SQLite,但我认为在不了解你的用例的情况下,这是多余的。我认为唯一可行的方法是LRU缓存:

import numpy as np
from scipy.optimize import minimize, NonlinearConstraint
from functools import lru_cache, wraps

# https://stackoverflow.com/a/52332109/313768
def np_cache(function):
    @lru_cache()
    def cached_wrapper(hashable_array):
        array = np.array(hashable_array)
        return function(array)

    @wraps(function)
    def wrapper(array):
        return cached_wrapper(tuple(array))

    # copy lru_cache attributes over too
    wrapper.cache_info = cached_wrapper.cache_info
    wrapper.cache_clear = cached_wrapper.cache_clear

    return wrapper

def rosenbrock_6d(x: np.ndarray) -> float:
    result = 0
    for i in range(5):
        result += 100*(x[i + 1] - x[i]**2)**2 + (1 - x[i])**2

    return result

def nonlinear_constraint(x: np.ndarray) -> float:
    return 10*x[0]**2 + 11*x[1]**2 + 12*x[2]**2 + 13*x[3]**2 + 14*x[4]**2 + 15*x[5]**2

@np_cache
def StoSAG(x: np.ndarray) -> tuple[
    np.ndarray,  # objective gradient
    np.ndarray,  # constraint gradient
]:
    print("OBj StoSAG is calculating!", flush=True)

    CU = np.eye(6)
    n_pert = 10
    u_norm_perturbed = np.random.multivariate_normal(x, CU, n_pert)
    u_norm_perturbed = np.transpose(u_norm_perturbed)
    J_ = np.zeros(n_pert)
    NL_array_pert = np.zeros((n_pert, 1))

    for j in range(n_pert):
        J_[j] = rosenbrock_6d(u_norm_perturbed[:, j])
        NL_array_pert[j] = nonlinear_constraint(u_norm_perturbed[:, j])

    J_ = np.reshape(J_, (n_pert, 1))
    j = J_ - rosenbrock_6d(x)
    U = u_norm_perturbed - np.reshape(x, (6, 1))
    du = np.linalg.pinv(np.transpose(U))
    grad_f = np.matmul(du, j).ravel()
    c_nl = (NL_array_pert - nonlinear_constraint(x))
    g_nl = np.matmul(du, c_nl)
    grad_g = np.transpose(g_nl)

    return grad_f, grad_g

def StoSAG_objective_grad(x: np.ndarray) -> np.ndarray:
    print('Objective gradient is calculating!', flush=True)
    return StoSAG(x)[0]

def StoSAG_constraint_grad(x: np.ndarray) -> np.ndarray:
    print('Constraint gradient is calculating!', flush=True)
    return StoSAG(x)[1]

def main() -> None:
    nlc = NonlinearConstraint(
        fun=nonlinear_constraint, lb=-np.inf, ub=2,
        jac=StoSAG_constraint_grad,
    )

    # Example usage:
    # Define a sample 6-dimensional point
    x_values = (1, 2, 3, 4, 5, 6)
    result = minimize(
        fun=rosenbrock_6d,
        x0=x_values,
        method='SLSQP',
        jac=StoSAG_objective_grad,
        constraints=nlc,
        options={
            'maxiter': 200,
            'ftol': 1e-2,
        },
    )

    print("Optimal solution: x =", result.x)
    print("Optimal objective function value:", result.fun)

if __name__ == '__main__':
    main()

个字符

相关问题