In [ ]:
# 必要なライブラリをインストール
!pip install japanize_matplotlib
In [3]:
# ライブラリを起動
from dateutil.relativedelta import relativedelta
import japanize_matplotlib
import math as math
import numpy as np
import pandas as pd
import scipy as sp
import random
import statsmodels.api as sm
from statsmodels.tsa.api import VAR
from sklearn.linear_model import LinearRegression
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
from matplotlib.pylab import rcParams
from matplotlib.ticker import MultipleLocator
rcParams["figure.figsize"] = 15, 6
# Colabでファイルを読み込むために、Google Driveをマウント
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive

(5) 再帰的ベクトル自己回帰モデルと行列の導入¶

インフレ率、失業率、金利の3変量VARモデル¶

  • 変数:インフレ率、失業率、金利の3変数
  • 期間は1972年第1四半期から2023年第4四半期まで
  • ラグ次数はBICにより2四半期が選択
  • 信頼区間の幅は90%ile
In [4]:
# データの読み込み
dinput = pd.read_excel('/content/drive/My Drive/Data/data_VAR5.xlsx',
                       sheet_name='data_q', header=[0])
data = np.zeros([len(dinput), 3])
for i in range(1, len(dinput)):
    data[:, 0] = dinput.cpi / dinput.cpi.shift(4) * 100 - 100
    data[:, 1] = dinput.u
    data[:, 2] = dinput.RSR
data = data[4:]
dlen = len(data)
dti = pd.date_range('1972-01-01', periods=dlen, freq='QS')
# グラフの描画
fig = plt.figure(figsize=(10, 6))
ax1 = fig.add_subplot()
ax1.plot(dti, data[:, 0], color='black', linewidth=1, linestyle = 'solid',
         label='インフレ率')
ax1.hlines([0], ['1972-01-01'], ['2023-10-01'], color='black', linewidth=1)
ax1.set_xlim(dti[0], dti[-1])
ax1.grid(False)
ax1.tick_params(labelsize=14)
formatter = mdates.DateFormatter("%Y")
ax1.xaxis.set_major_formatter(formatter)
locator = mdates.YearLocator(base=4)
ax1.xaxis.set_major_locator(locator)
ax1.set_xlabel('年', fontsize=14, loc='center')
ax1.set_ylim(-12, 24)
ax1.yaxis.set_major_locator(MultipleLocator(4))
ax1.set_ylabel('%', fontsize=14)
h1, l1 = ax1.get_legend_handles_labels()
ax2 = ax1.twinx()
ax2.plot(dti, data[:, 1], color='black', linewidth=1, linestyle = 'dotted',
         label='失業率(右軸)')
ax2.plot(dti, data[:, 2], color='black', linewidth=1, linestyle = 'dashed',
         label='金利(右軸)')
ax2.grid(False)
ax2.tick_params(labelsize=14)
ax2.set_ylim(-6, 12)
ax2.set_ylabel('%', fontsize=14)
h2, l2 = ax2.get_legend_handles_labels()
ax1.legend(h1 + h2, l1 + l2, loc='upper right', frameon=False, fontsize=14)
Out[4]:
<matplotlib.legend.Legend at 0x7e5869b08450>
No description has been provided for this image
In [5]:
# VARモデルの推定
CONFIDENCE_LEVEL = 0.9
lbound, ubound = sp.stats.norm.interval(confidence=CONFIDENCE_LEVEL, loc=0,
                                        scale=1)
model = VAR(data)
PERIOD = 60
results = model.fit(ic='bic', maxlags=8)
lag = results.k_ar
irf = results.irf(PERIOD)
impulse = irf.orth_irfs
lband = impulse + irf.stderr(orth=True) * lbound
uband = impulse + irf.stderr(orth=True) * ubound

# グラフの描画
vnames = ['インフレ率', '失業率', '金利']
snames = ['供給ショック', '需要ショック', '金融政策ショック']
fig, ax = plt.subplots(3, 3, figsize=[14, 14])
for i in range(3):
    for j in range(3):
        ax[i,j].plot(impulse[:,i,j], linewidth=2, color='black')
        ax[i,j].plot(lband[:,i,j], linewidth=1, linestyle='dashed',
                     color='black')
        ax[i,j].plot(uband[:,i,j], linewidth=1, linestyle='dashed',
                     color='black')
        ax[i,j].hlines([0], 0, PERIOD, color='black', linestyle='solid')
        ax[i,j].set_xlim(0, PERIOD)
        ax[i,j].set_xlabel('四半期', fontsize=14)
        for ii in range(3):
            ax[ii,0].set_ylabel('インパルス応答', fontsize=14)
        ax[i,j].set_title('%s→%s' %(snames[j],vnames[i]), fontsize=14)
        ax[0,0].set_ylim(-0.4, 1.4)
        ax[0,1].set_ylim(-0.6, 0.1)
        ax[0,2].set_ylim(-0.4, 0.5)
        ax[1,0].set_ylim(-0.06, 0.1)
        ax[1,1].set_ylim(-0.05, 0.25)
        ax[1,2].set_ylim(-0.05, 0.15)
        ax[2,0].set_ylim(-0.3, 0.5)
        ax[2,1].set_ylim(-0.6, 0.1)
        ax[2,2].set_ylim(-0.4, 1)
plt.subplots_adjust(wspace=0.4, hspace=0.3)
plt.show()
No description has been provided for this image
In [6]:
# VARモデルの誤差項の分散共分散行列
print('誤差項の分散共分散行列\n', results.sigma_u)

# VARモデルのショックのサイズの計算
L = np.linalg.cholesky(results.sigma_u)
Linv = np.linalg.inv(L)
print('ショックの1標準偏差 (c11とc22とc33) \n', np.diag(L))
print('コレスキー分解を行った行列C \n', L)
print('行列Cの逆行列A0 \n', Linv)
print('b_31_0 \n', -1* Linv[2,0] * L[2,2])
print('b_32_0 \n', -1* Linv[2,1] * L[2,2])
誤差項の分散共分散行列
 [[ 0.54212987 -0.00824515  0.07147856]
 [-0.00824515  0.0142253  -0.00423919]
 [ 0.07147856 -0.00423919  0.17104551]]
ショックの1標準偏差 (c11とc22とc33) 
 [0.73629469 0.11874299 0.40114407]
コレスキー分解を行った行列C 
 [[ 0.73629469  0.          0.        ]
 [-0.01119816  0.11874299  0.        ]
 [ 0.09707873 -0.0265455   0.40114407]]
行列Cの逆行列A0 
 [[ 1.35815186  0.          0.        ]
 [ 0.1280817   8.42154961  0.        ]
 [-0.32020334  0.55729157  2.49286996]]
b_31_0 
 0.128447669198633
b_32_0 
 -0.2235542073369449

(練習問題2) インフレ率、失業率、金利の3変数の局所予測法¶

  • 変数:インフレ率、失業率、金利の3変数
  • 期間は1972年第1四半期から2023年第4四半期まで
  • ラグ次数は16四半期とする
  • 信頼区間の幅は90%ile
In [7]:
#データの読み込み
dinput = pd.read_excel('/content/drive/My Drive/Data/data_VAR5.xlsx',
                       sheet_name='data_q', header=[0])
data = np.zeros([len(dinput), 3])
for i in range(1, len(dinput)):
    data[:, 0] = dinput.cpi / dinput.cpi.shift(4) * 100 - 100
    data[:, 1] = dinput.u
    data[:, 2] = dinput.RSR
data = data[4:]
dlen = len(data)
dti = pd.date_range('1972-01-01', periods=dlen, freq='QS')

# LPの推定
L = 16 + 1
MAX_H = 21
CONFIDENCE_LEVEL = 0.90
coef_result, std_result = np.zeros([MAX_H, len(data.T)]), np.zeros(
    [MAX_H, len(data.T)])
lbound, ubound = sp.stats.norm.interval(confidence=CONFIDENCE_LEVEL, loc=0,
                                        scale=1)
ssize = len(data)
counter = 0
h = 0

for dep in range(0, len(data.T), 1):
    for indep in range(2, 3, 1):
        if dep == 0 and indep == 2:
            coef_result[h, 0] = np.array(0)
        elif dep == 1 and indep == 2:
            coef_result[h, 1] = np.array(0)
        elif dep == 2 and indep == 2:
            coef_result[h, 2] = np.array(1)
for dep in range(0, len(data.T), 1):
    for indep in range(2, 3, 1):
        for h in range(1, MAX_H, 1):
            if dep == 0 and indep == 2:
                sample_y = pd.DataFrame(data[L + h - 1 : ssize, dep])
                sample_x = pd.DataFrame(data[L - 1 : L - 1 + len(sample_y)])
                for l in range(1, L, 1):
                    sample_x_lagged = pd.DataFrame(data[L - 1 - l :
                                                       L - 1 - l +
                                                       len(sample_y)])
                    sample_x = pd.concat([sample_x, sample_x_lagged], axis=1)
                sample_x = sm.add_constant(sample_x)
                model = sm.OLS(sample_y, sample_x)
                result = model.fit(cov_type='HAC',
                                   cov_kwds={'maxlags':h + 1,
                                             'kernel':'bartlett'})
                coef_result[h, counter] = np.array(result.params.iloc[indep+1])
                std_result[h, counter] = np.array(result.bse.iloc[indep+1])
            if dep == 1 and indep == 2:
                sample_y = pd.DataFrame(data[L + h - 1 : ssize, dep])
                sample_x = pd.DataFrame(data[L - 1 : L - 1 + len(sample_y)])
                for l in range(1, L, 1):
                    sample_x_lagged = pd.DataFrame(data[L - 1 - l :
                                                       L - 1 - l +
                                                       len(sample_y)])
                    sample_x = pd.concat([sample_x, sample_x_lagged], axis=1)
                sample_x = sm.add_constant(sample_x)
                model = sm.OLS(sample_y, sample_x)
                result = model.fit(cov_type='HAC',
                                   cov_kwds={'maxlags':h + 1,
                                             'kernel':'bartlett'})
                coef_result[h, counter] = np.array(result.params.iloc[indep+1])
                std_result[h, counter] = np.array(result.bse.iloc[indep+1])
            if dep == 2 and indep == 2:
                sample_y = pd.DataFrame(data[L + h - 1 : ssize, dep])
                sample_x = pd.DataFrame(data[L - 1 : L - 1 + len(sample_y)])
                for l in range(1, L, 1):
                    sample_x_lagged = pd.DataFrame(data[L - 1 - l :
                                                       L - 1 - l +
                                                       len(sample_y)])
                    sample_x = pd.concat([sample_x, sample_x_lagged], axis=1)
                sample_x = sm.add_constant(sample_x)
                model = sm.OLS(sample_y, sample_x)
                result = model.fit(cov_type='HAC',
                                   cov_kwds={'maxlags':h + 1,
                                             'kernel':'bartlett'})
                coef_result[h, counter] = np.array(result.params.iloc[indep+1])
                std_result[h, counter] = np.array(result.bse.iloc[indep+1])
        counter = counter + 1

# VAR(2)モデルのインパルス応答関数
models = VAR(data)
results = models.fit(2)
irf = results.irf(MAX_H)
impulse = irf.orth_irfs

# グラフの描画
vnames = ['インフレ率', '失業率', '金利']
snames = ['金融政策ショック']
fig, ax=plt.subplots(3, 1, figsize=[6,12])
gcounter = 0
for i in range(3):
    ax[i].plot(coef_result[:, gcounter], linewidth=2, color='black',
               label='LPのインパルス応答関数')
    ax[i].plot(coef_result[:, gcounter] + std_result[:, gcounter] * lbound,
               linewidth=1, linestyle='dashed', color='black',
               label='信頼区間の上限下限')
    ax[i].plot(coef_result[:, gcounter] + std_result[:, gcounter] * ubound,
               linewidth=1, linestyle='dashed', color='black')
    ax[i].hlines([0], 0, MAX_H, color='black', linestyle='solid')
    ax[i].set_xlim(0, MAX_H - 1)
    ax[i].xaxis.set_major_locator(MultipleLocator(4))
    ax[i].set_xlabel('四半期', fontsize=14)
    ax[i].set_ylabel('インパルス応答', fontsize=14)
    ax[i].set_title('%s→%s' %(snames[0],vnames[i]), fontsize=14)
    ax[0].legend(loc='lower left', frameon=False, fontsize=11)
    ax[0].set_ylim(-1, 1)
    ax[1].set_ylim(-0.4, 0.8)
    ax[2].set_ylim(-1, 2)
    gcounter = gcounter + 1
plt.subplots_adjust(wspace=0.4, hspace=0.3)
plt.show()
No description has been provided for this image