scipy.optimize.minimize返回“ ValueError:系列的真值不明确”

时间:2019-01-04 12:04:50

标签: python pandas optimization scipy minimize

我正在使用scipy.optimize.minimize包并使用BFGS方法来最大化ARMA模型的LogLikelihood。但是,出现以下错误:

The truth value of a Series is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().

我正在优化的函数返回正确的输出,即指定的ARMA模型的对数似然性,因此我试图查看最小化包的源代码,但是它相当复杂,我无法弄清楚是什么问题是。我意识到这不是一个简单的问题,但是我希望对使用Minimal软件包有经验的人可以为我提供一些可能导致错误的指导。

给定的回溯如下:

    ValueError                                Traceback (most recent call last)

/......../.py in fit_ARMA(data, p, q)
    150     optim_args=(data, p, q)
    151
--> 152     fitted_params = minimize(minus_ll_ARMA, x0=init_params, args=optim_args, method='BFGS')
    153
    154     return fitted_params.x

/anaconda3/lib/python3.6/site-packages/scipy/optimize/_minimize.py in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
    595         return _minimize_cg(fun, x0, args, jac, callback, **options)
    596     elif meth == 'bfgs':
--> 597         return _minimize_bfgs(fun, x0, args, jac, callback, **options)
    598     elif meth == 'newton-cg':
    599         return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,

/anaconda3/lib/python3.6/site-packages/scipy/optimize/optimize.py in _minimize_bfgs(fun, x0, args, jac, callback, gtol, norm, eps, maxiter, disp, return_all, **unknown_options)
    981             alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
    982                      _line_search_wolfe12(f, myfprime, xk, pk, gfk,
--> 983                                           old_fval, old_old_fval, amin=1e-100, amax=1e100)
    984         except _LineSearchError:
    985             # Line search failed to find a better solution.

/anaconda3/lib/python3.6/site-packages/scipy/optimize/optimize.py in _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs)
    801     ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
    802                              old_fval, old_old_fval,
--> 803                              **kwargs)
    804
    805     if ret[0] is not None and extra_condition is not None:

/anaconda3/lib/python3.6/site-packages/scipy/optimize/linesearch.py in line_search_wolfe1(f, fprime, xk, pk, gfk, old_fval, old_old_fval, args, c1, c2, amax, amin, xtol)
     99     stp, fval, old_fval = scalar_search_wolfe1(
    100             phi, derphi, old_fval, old_old_fval, derphi0,
--> 101             c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)
    102
    103     return stp, fc[0], gc[0], fval, old_fval, gval[0]

/anaconda3/lib/python3.6/site-packages/scipy/optimize/linesearch.py in scalar_search_wolfe1(phi, derphi, phi0, old_phi0, derphi0, c1, c2, amax, amin, xtol)
    153
    154     if old_phi0 is not None and derphi0 != 0:
--> 155         alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
    156         if alpha1 < 0:
    157             alpha1 = 1.0

/anaconda3/lib/python3.6/site-packages/pandas/core/generic.py in __nonzero__(self)
   1574         raise ValueError("The truth value of a {0} is ambiguous. "
   1575                          "Use a.empty, a.bool(), a.item(), a.any() or a.all()."
-> 1576                          .format(self.__class__.__name__))
   1577
   1578     __bool__ = __nonzero__

ValueError: The truth value of a Series is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().

下面是使用的两个函数:

def ll_ARMA(params, data, p, q):
'''Returns the log-likelihood of the ARMA model.
Data index must be increasing (oldest observations on top)
p= # coeff. AR
q= # coeff MA'''

n = data.shape[0]

if p>0:
    lagged_data = data.shift(1).copy()
    for i in range(1,p):
        lagged_data = pd.concat([lagged_data, data.shift(i+1)], axis=1)

else:
    lagged_data = pd.DataFrame(index = data.index, columns=[i+1 for i in range(p)])

errors_index = pd.Index([i for i in range(q)]).append(data.index)
errors = pd.DataFrame(np.zeros(shape=(n+q,1)),index=errors_index)
for i in range(q, n+q):
    a = data.iloc[i-q,:] - params[0] - np.sum(np.dot(lagged_data, params[1:p+1])) - \
        np.dot(params[p+1:], np.asarray([errors.iloc[i-j,:] for j in range(1, q+1)]))
    errors.iloc[i,:] =  a.item()   

errors = errors[q:] # gets rid of initial q values set at 0
var = np.var(errors)
log_likelihood = np.log(2*np.pi*var)*(-1/2) + (-1/2)*(errors**2)/var

return np.sum(log_likelihood)

def fit_ARMA(data, p, q):
    '''Returns the fitted parameters of an ARMA model (including the constant).
    Order is -constant -p AR coeff -q MA coeff
    Data must be a 1-column DF.
    p is the parameter of the AR, q of the MA'''

init_params = np.random.random(p+q+1)

def minus_ll_ARMA(params, data, p, q):
    return -1*ll_ARMA(params, data, p, q)

optim_args=(data, p, q)

fitted_params = minimize(minus_ll_ARMA, x0=init_params, args=optim_args, method='BFGS')

return fitted_params.x

这是一个产生错误的可行示例:

import pandas as pd # version 0.23.4
import numpy as np # version 1.15.4
from scipy.optimize import minimize   # version 1.1.0

data = pd.DataFrame(np.random.random(500)*0.4-0.2, index = [i for i in range(1,501)]) # randomly generates returns in a sensible range
parameters = fit_ARMA(data, 2, 3) # this returns the error

P.S。我已经研究了以下问题:scipy.optimize.minimize Jacobian function causes 'Value Error: The truth value of an array with more than one element is ambiguous',但是我不包括渐变,因此误差的性质不同。

0 个答案:

没有答案
相关问题