forked from iganna/semobay
-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathsem_opt_abc.py
executable file
·297 lines (263 loc) · 10.1 KB
/
sem_opt_abc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
from sem_model import SEMData, SEMModel
import numpy as np
from functools import partial
from abc import ABC, abstractmethod
from itertools import product
import scipy.stats as st
class SEMOptABC(ABC):
def __init__(self, mod: SEMModel, data: SEMData):
"""
Initialisation of the optimiser
:param mod:
:param data:
:param estimator:
"""
# TODO Does the model and the data are in agreement
self.get_matrices = mod.get_matrices
self.params = np.array(mod.param_val)
self.initial_params = self.params.copy()
self.param_pos = mod.param_pos
self.param_bounds = mod.get_bounds()
self.m_profiles = data.m_profiles
self.m_cov = data.m_cov # Covariance matrix
n_prof = self.m_profiles.shape[0]
n_stoch = round(n_prof * 0.9)
self.n_cov_set = 100
self.m_cov_set = \
[np.cov(self.m_profiles[np.random.choice(n_prof,
n_stoch,
replace=False)],
rowvar=False,
bias=True)
for _ in range(self.n_cov_set)]
# for optimisation
self.min_loss = 0
self.min_params = self.params
self.__prepare_diff_matrices(mod)
@property
def m_cov_stoch(self):
# return self.m_cov_set[np.random.choice(self.n_cov_set, 1)[0]]
n_prof = self.m_profiles.shape[0]
n_stoch = round(n_prof * 0.99)
return np.cov(self.m_profiles[np.random.choice(n_prof,
n_stoch,
replace=False)],
rowvar=False,
bias=True)
def __prepare_diff_matrices(self, model: SEMModel):
"""Builds derivatives of each of matricies."""
self.dParamsMatrices = list()
ms = self.get_matrices()
for k in range(model.n_param):
mxType, i, j = model.param_pos[k]
dMt = np.zeros_like(ms[mxType])
dMt[i, j] = 1
if mxType in {'Psi', 'Theta'}:
dMt[j, i] = 1
self.dParamsMatrices.append((mxType, dMt))
@abstractmethod
def loss_functions(self) -> dict:
raise ValueError("Loss functions is not specified")
def reset_params(self):
self.params = self.initial_params.copy()
@abstractmethod
def optimize(self, opt_method='SLSQP', bounds=None, alpha=0):
raise ValueError("Optimizer is not specified")
def get_loss_function(self, name):
loss_dict = self.loss_functions()
if name in loss_dict.keys():
return loss_dict[name]
else:
raise Exception("SEMopy Backend doesn't support loss function {}.".format(name))
def calculate_sigma(self, params=None):
"""
Sigma matrix calculated from the model.
"""
if params is None:
params = self.params
ms = self.get_matrices(params)
Beta = ms['Beta']
Lambda = ms['Lambda']
Psi = ms['Psi']
Theta = ms['Theta']
C = np.linalg.pinv(np.identity(Beta.shape[0]) - Beta)
M = Lambda @ C
return M @ Psi @ M.T + Theta
def calculate_sigma_gradient(self, params=None):
if params is None:
params = self.params
ms = self.get_matrices(params)
Beta = ms['Beta']
Lambda = ms['Lambda']
Psi = ms['Psi']
C = np.linalg.pinv(np.identity(Beta.shape[0]) - Beta)
M = Lambda @ C
M_T = M.T
K = C @ Psi
KM_T = K @ M_T
grad = list()
for mxType, mx in self.dParamsMatrices:
if mxType == 'Theta':
grad.append(mx)
elif mxType == 'Lambda':
t = mx @ KM_T
grad.append(t + t.T)
elif mxType == 'Beta':
t = mx @ K
grad.append(M @ (t + t.T) @ M_T)
elif mxType == 'Psi':
grad.append(M @ mx @ M_T)
else:
grad.append(np.zeros_like(self.matrices['Theta']))
return grad
def calculate_sigma_hessian(self, params=None):
if params is None:
params = self.params
ms = self.get_matrices(params)
Beta = ms['Beta']
Lambda = ms['Lambda']
Psi = ms['Psi']
zeroMatrix = np.zeros_like(ms['Theta'])
n, m = len(params), zeroMatrix.shape[0]
hessian = np.zeros((n, n, m, m))
C = np.linalg.pinv(np.identity(Beta.shape[0]) - Beta)
M = Lambda @ C
M_T = M.T
CPsi = C @ Psi
CPsi_T = CPsi.T
T = CPsi @ C.T
for i, j in product(range(n), range(n)):
aType, iMx = self.dParamsMatrices[i]
bType, jMx = self.dParamsMatrices[j]
if aType == 'Beta':
if bType == 'Beta':
K = iMx @ CPsi
kSum = K + K.T
BiC = iMx @ C
BiC_T = BiC.T
BkC = jMx @ C
BkC_T = BkC.T
h = M @ (BkC @ kSum + kSum @ BkC_T + BiC @ BkC @ Psi +\
CPsi_T @ BkC_T @ BiC_T) @ M_T
hessian[i, j] = h
elif bType == 'Lambda':
K = iMx @ CPsi
kSum = K + K.T
t = jMx @ C
hessian[i, j] = M @ kSum @ t.T + t @ kSum @ M_T
elif bType == 'Psi':
K_hat = iMx @ C @ jMx
hessian[i, j] = M @ (K_hat + K_hat.T) @ M_T
elif bType == 'Theta':
hessian[i, j] = zeroMatrix
elif aType == 'Lambda':
if bType == 'Beta':
K_hat = jMx @ CPsi
kSum = K_hat + K_hat.T
Mi = iMx @ C
hessian[i, j] = M @ kSum @ Mi.T + Mi @ kSum @ M_T
elif bType == 'Lambda':
hessian[i, j] = iMx @ T @ jMx.T + jMx @ T @ iMx.T
elif bType == 'Psi':
Mi = iMx @ C
hessian[i, j] = Mi @ jMx @ M_T + M @ jMx @ Mi.T
elif bType == 'Theta':
hessian[i, j] = zeroMatrix
elif aType == 'Psi':
if bType == 'Beta':
K = jMx @ CPsi
kSum = K + K.T
hessian[i, j] = M @ kSum @ M_T
elif bType == 'Lambda':
Mj = jMx @ C
hessian[i, j] = Mj @ iMx @ M_T + M @ iMx @ Mj.T
else:
hessian[i, j] = zeroMatrix
else:
hessian[i, j] = zeroMatrix
return hessian
def ml_wishart(self, params):
"""
F_wish = tr[S * Sigma^(-1)] + log(det(Sigma)) - log(det(S)) - (# of variables)
We need to minimize the abs(F_wish) as it is a log of the ratio
and the ration tends to be 1.
:param params:
:return:
"""
Sigma = self.calculate_sigma(params)
Cov = self.m_cov
det_sigma = np.linalg.det(Sigma)
det_cov = np.linalg.det(Cov)
if det_sigma < 0:
return 1000000
log_det_ratio = np.log(det_sigma) - np.log(det_cov)
inv_Sigma = np.linalg.pinv(Sigma)
loss = np.trace(Cov @ inv_Sigma) + log_det_ratio - Cov.shape[0]
return abs(loss)
def ml_wishart_gradient(self, params):
Sigma = self.calculate_sigma(params)
Sigma_grad = self.calculate_sigma_gradient(params)
# Cov = self.m_cov
Cov = self.m_cov_stoch
inv_Sigma = np.linalg.pinv(Sigma)
cs = Cov @ inv_Sigma
return np.array([np.trace(inv_Sigma @ g - cs @ g @ inv_Sigma)
for g in Sigma_grad])
@staticmethod
def ml_norm_log_likelihood(m_matrix, m_profiles):
det_sigma = np.linalg.det(m_matrix)
log_det_sigma = np.log(det_sigma)
m_inv_sigma = np.linalg.inv(m_matrix)
k = m_matrix.shape[0]
acc_log_exp = 0
acc_log_exp1 = 0
for y in m_profiles:
acc_log_exp -= 1/2 * (log_det_sigma +
y @ m_inv_sigma @ y +
k*np.log(2*np.pi))
return acc_log_exp
@staticmethod
def ml_norm_log_likelihood_new(m_matrix, m_profiles):
"""
:param m_matrix:
:param m_profiles:
:return:
"""
acc_log_exp = 0
for y in m_profiles:
acc_log_exp += st.multivariate_normal.logpdf(x=y,
mean=y*0,
cov=m_matrix)
return acc_log_exp
def regu_l1(self, params):
return np.sum((np.abs(params))) / len(params)
def regu_l1_gradient(self, params):
return np.sign(params) / len(params)
def regu_l2(self, params):
return np.linalg.norm(params) ** 2
def regu_l2_gradient(self, params):
return 2 * params
def constraint_theta(self, params):
ms = self.get_matrices(params)
# return np.linalg.det(ms['Theta']) - 1e-6
# return sum(np.linalg.eig(ms['Theta'])[0] > 0) - ms['Theta'].shape[0]
return sum(ms['Theta'].diagonal() >= 0) - ms['Theta'].shape[0]
def constraint_psi(self, params):
ms = self.get_matrices(params)
# return np.linalg.det(ms['Psi']) - 1e-6
return sum(np.linalg.eig(ms['Psi'])[0] > 0) - ms['Psi'].shape[0]
def constraint_sigma(self, params):
m_sigma = self.calculate_sigma(params)
# return np.linalg.det(m_sigma) - 1e-6
return sum(np.linalg.eig(m_sigma)[0] > 0) - m_sigma.shape[0]
def constraint_all(self, params):
return self.constraint_psi(params) + \
self.constraint_sigma(params) + \
self.constraint_theta(params)
#
# def gradient(self):
# def grad_coord(x):
# return (self.loss_func(self.params + x * eps) - self.loss_func(self.params))/eps
# eps = 1e-6
# g = np.array([grad_coord(x) for x in np.identity(len(self.params))])
# return g