I'm trying out python's scipy.optimize to minimize some function using the SLSQP algorithm. The optimization seems to work fine unconstrained and with one matrix constraint, but then I get an error when I add a second matrix constraint.
import nlopt
import numpy as np
from scipy.optimize import minimize
n = 3
n_sim = 10000
risk_aversion = 5
Y = np.random.normal(loc=0, scale=1, size=(n_sim, n))
mu_Y = np.mean(Y, axis=0).reshape(1, n)
sigma_Y = np.cov(Y, rowvar=0, bias=1)
x0 = np.ones((n, 1)) / n
Aeq = np.ones((1, n))
beq = 1
A = np.array([[1, 1, 0], [-1, -1, 0]])
b = np.array([[0.25], [-0.5]])
def func(x, mu, sigma, risk_aversion):
return -np.dot(mu, x) + 0.5 * risk_aversion * np.dot(np.dot(x.T, sigma), x)
def func_deriv(x, sigma, risk_aversion):
return risk_aversion * np.dot(sigma, x)
c_ = {'type': 'eq', 'fun' : lambda x: np.dot(Aeq, x) - beq, 'jac' : lambda x: Aeq}
b_ = [(0,1) for i in range(n)]
This version with one constraint and bounds works fine
res = minimize(lambda x: func(x, mu_Y, sigma_Y, risk_aversion), x0,
jac=lambda x: func_deriv(x, sigma_Y, risk_aversion),
constraints=c_, bounds=b_, method='SLSQP', options={'disp': True})
However, the two constraint version gives me an error
d_ = (c_,
{'type': 'ineq', 'fun' : lambda x: np.dot(A, x) - b, 'jac' : lambda x: A})
res2 = minimize(lambda x: func(x, mu_Y, sigma_Y, risk_aversion), x0,
jac=lambda x: func_deriv(x, sigma_Y, risk_aversion),
constraints=d_, bounds=b_, method='SLSQP', options={'disp': True})