20 orig_over_appr = X.array() / (X_hat + eps_mat).array();
28 matrixd appr_squared = X_hat.array().pow(2);
29 orig_over_appr_squared = X.array() / (appr_squared + eps_mat).array();
35 auto x =
static_cast<size_t>(S.dimension(0));
36 auto y =
static_cast<size_t>(S.dimension(1));
37 auto z =
static_cast<size_t>(S.dimension(2));
39 #pragma omp parallel for schedule(static) 40 for (
size_t i = 0; i < x; ++i) {
41 for (
size_t j = 0; j < y; ++j) {
42 for (
size_t k = 0; k < z; ++k) {
43 S(i, j, k) = orig_over_appr(i, j) * nu(i, k) * mu(k, j);
52 auto x =
static_cast<size_t>(S.dimension(0));
53 auto z =
static_cast<size_t>(S.dimension(2));
57 Eigen::Map<matrix_t<T>> S_ipk(S_ipk_tensor.data(), x, z);
58 alpha_eph = S_ipk.array().colwise() + alpha.transpose().array();
64 auto y =
static_cast<size_t>(S.dimension(1));
65 auto z =
static_cast<size_t>(S.dimension(2));
68 Eigen::Map<matrix_t<T>> S_pjk(S_pjk_tensor.data(), y, z);
69 beta_eph = S_pjk.array().rowwise() + beta.array();
75 auto x =
static_cast<size_t>(grad_plus.dimension(0));
76 auto y =
static_cast<size_t>(grad_plus.dimension(1));
77 auto z =
static_cast<size_t>(grad_plus.dimension(2));
79 #pragma omp parallel for 80 for (
size_t i = 0; i < x; ++i) {
81 for (
size_t j = 0; j < y; ++j) {
82 for (
size_t k = 0; k < z; ++k) {
92 auto x =
static_cast<size_t>(grad_minus.rows());
93 auto z =
static_cast<size_t>(grad_minus.cols());
95 vector_t<T> alpha_eph_sum = alpha_eph.colwise().sum();
96 #pragma omp parallel for 97 for (
size_t i = 0; i < x; ++i) {
98 for (
size_t k = 0; k < z; ++k) {
105 template <
typename T>
110 auto x =
static_cast<size_t>(grad_plus.dimension(0));
111 auto y =
static_cast<size_t>(grad_plus.dimension(1));
112 auto z =
static_cast<size_t>(grad_plus.dimension(2));
115 #pragma omp parallel for 116 for (
size_t i = 0; i < x; ++i) {
117 for (
size_t j = 0; j < y; ++j) {
118 for (
size_t k = 0; k < z; ++k) {
120 orig_over_appr(i, j) * mu(k, j) * grad_plus(i, j, k);
124 #pragma omp parallel for 125 for (
size_t i = 0; i < x; ++i) {
126 for (
size_t j = 0; j < y; ++j) {
127 for (
size_t k = 0; k < z; ++k) {
128 for (
size_t c = 0; c < z; ++c) {
129 nom(i, k) += orig_over_appr_squared(i, j) * mu(k, j) *
130 nu(i, c) * mu(c, j) * grad_minus(i, c);
136 #pragma omp parallel for 137 for (
size_t i = 0; i < x; ++i) {
138 for (
size_t j = 0; j < y; ++j) {
139 for (
size_t k = 0; k < z; ++k) {
141 orig_over_appr(i, j) * mu(k, j) * grad_minus(i, k);
145 #pragma omp parallel for 146 for (
size_t i = 0; i < x; ++i) {
147 for (
size_t j = 0; j < y; ++j) {
148 for (
size_t k = 0; k < z; ++k) {
149 for (
size_t c = 0; c < z; ++c) {
150 denom(i, k) += orig_over_appr_squared(i, j) * mu(k, j) *
151 nu(i, c) * mu(c, j) * grad_plus(i, j, c);
157 #pragma omp parallel for schedule(static) 158 for (
size_t i = 0; i < x; ++i) {
159 for (
size_t k = 0; k < z; ++k) {
160 nu(i, k) *= nom(i, k) / (denom(i, k) + eps);
167 nu = nu.array().colwise() / nu_rowsum_plus_eps.transpose().array();
170 template <
typename T>
176 auto x =
static_cast<size_t>(grad_plus.dimension(0));
177 auto y =
static_cast<size_t>(grad_plus.dimension(1));
178 auto z =
static_cast<size_t>(grad_plus.dimension(2));
181 #pragma omp parallel for 182 for (
size_t j = 0; j < y; ++j) {
183 for (
size_t k = 0; k < z; ++k) {
184 for (
size_t i = 0; i < x; ++i) {
186 orig_over_appr(i, j) * nu(i, k) * grad_plus(i, j, k);
190 #pragma omp parallel for 191 for (
size_t j = 0; j < y; ++j) {
192 for (
size_t k = 0; k < z; ++k) {
193 for (
size_t i = 0; i < x; ++i) {
194 for (
size_t c = 0; c < z; ++c) {
195 nom(k, j) += orig_over_appr_squared(i, j) * nu(i, k) *
196 nu(i, c) * mu(c, j) * grad_minus(i, c);
202 #pragma omp parallel for 203 for (
size_t j = 0; j < y; ++j) {
204 for (
size_t k = 0; k < z; ++k) {
205 for (
size_t i = 0; i < x; ++i) {
207 orig_over_appr(i, j) * nu(i, k) * grad_minus(i, k);
211 #pragma omp parallel for 212 for (
size_t j = 0; j < y; ++j) {
213 for (
size_t k = 0; k < z; ++k) {
214 for (
size_t i = 0; i < x; ++i) {
215 for (
size_t c = 0; c < z; ++c) {
216 denom(k, j) += orig_over_appr_squared(i, j) * nu(i, k) *
217 nu(i, c) * mu(c, j) * grad_plus(i, j, c);
223 #pragma omp parallel for schedule(static) 224 for (
size_t k = 0; k < z; ++k) {
225 for (
size_t j = 0; j < y; ++j) {
226 mu(k, j) *= nom(k, j) / (denom(k, j) + eps);
233 mu = mu.array().rowwise() / mu_colsum_plus_eps.array();
285 template <
typename T,
typename Scalar>
289 size_t max_iter = 1000,
double eps = 1e-50) {
292 auto x =
static_cast<size_t>(X.rows());
293 auto y =
static_cast<size_t>(X.cols());
301 matrix_t<T> nu(x, z);
302 matrix_t<T> mu(y, z);
311 for (
size_t i = 0; i < x; ++i) {
312 gsl_ran_dirichlet(rnd_gen.
get(), z, dirichlet_params.
data(),
315 for (
size_t j = 0; j < y; ++j) {
316 gsl_ran_dirichlet(rnd_gen.
get(), z, dirichlet_params.
data(),
320 mu.transposeInPlace();
324 matrix_t<T> grad_minus(x, z);
327 matrix_t<T> X_hat, orig_over_appr, orig_over_appr_squared, alpha_eph,
330 for (
size_t eph = 0; eph < max_iter; ++eph) {
336 X, X_hat, eps_mat, orig_over_appr_squared);
345 mu, grad_plus, grad_minus, eps, nu);
352 X, X_hat, eps_mat, orig_over_appr_squared);
361 nu, grad_plus, grad_minus, eps, mu);
void update_grad_minus(const matrix_t< T > &alpha_eph, matrix_t< T > &grad_minus)
Definition: bld_appr.hpp:91
Structure to hold the parameters for the Allocation Model .
Definition: alloc_model_params.hpp:25
static void update_mu(const matrix_t< T > &orig_over_appr, const matrix_t< T > &orig_over_appr_squared, const matrix_t< T > &nu, const tensor_t< T, 3 > &grad_plus, const matrix_t< T > &grad_minus, double eps, matrix_t< T > &mu)
Definition: bld_appr.hpp:171
Eigen::Tensor< Scalar, N, Eigen::RowMajor > tensor_t
Tensor type used in the computations.
Definition: defs.hpp:52
Eigen::Matrix< Scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor > matrix_t
Matrix type used in the computations.
Definition: defs.hpp:41
void update_beta_eph(const vector_t< T > &beta, const tensor_t< T, 3 > &S, matrix_t< T > &beta_eph)
Definition: bld_appr.hpp:62
void update_grad_plus(const matrix_t< T > &beta_eph, const tensor_t< T, 3 > &S, tensor_t< T, 3 > &grad_plus)
Definition: bld_appr.hpp:73
void update_orig_over_appr(const matrix_t< T > &X, const matrix_t< T > &X_hat, const matrix_t< T > &eps_mat, matrix_t< T > &orig_over_appr)
Definition: bld_appr.hpp:17
matrix_t< double > matrixd
Matrix type specialization using double as Scalar value.
Definition: defs.hpp:46
void update_S(const matrix_t< T > &orig_over_appr, const matrix_t< T > &nu, const matrix_t< T > &mu, tensor_t< T, 3 > &S)
Definition: bld_appr.hpp:33
Eigen::array< size_t, N > shape
Shape of vectors, matrices, tensors, etc.
Definition: defs.hpp:66
std::vector< Scalar > alpha
Parameter vector of Dirichlet prior for matrix of size .
Definition: alloc_model_params.hpp:37
Real psi_appr(Real x)
Calculate digamma function approximately using the first 8 terms of the asymptotic expansion of digam...
Definition: util.hpp:440
void check_bld_params(const matrix_t< T > &X, size_t z, const alloc_model::Params< Scalar > &model_params)
Do parameter checks on BLD computing function parameters and throw an assertion error if the paramete...
Definition: util_details.hpp:20
std::tuple< tensor_t< T, 3 >, matrix_t< T >, matrix_t< T > > bld_appr(const matrix_t< T > &X, size_t z, const alloc_model::Params< Scalar > &model_params, size_t max_iter=1000, double eps=1e-50)
Compute tensor , the solution of BLD problem , and optimization matrices and from matrix using the...
Definition: bld_appr.hpp:287
void update_nu(const matrix_t< T > &orig_over_appr, const matrix_t< T > &orig_over_appr_squared, const matrix_t< T > &mu, const tensor_t< T, 3 > &grad_plus, const matrix_t< T > &grad_minus, double eps, matrix_t< T > &nu)
Definition: bld_appr.hpp:106
void update_orig_over_appr_squared(const matrix_t< T > &X, const matrix_t< T > &X_hat, const matrix_t< T > &eps_mat, matrix_t< T > &orig_over_appr_squared)
Definition: bld_appr.hpp:24
void update_alpha_eph(const vector_t< T > &alpha, const tensor_t< T, 3 > &S, matrix_t< T > &alpha_eph)
Definition: bld_appr.hpp:50
std::vector< Scalar > beta
Parameter vector of Dirichlet prior for matrix of size .
Definition: alloc_model_params.hpp:42
Eigen::Matrix< Scalar, 1, Eigen::Dynamic, Eigen::RowMajor > vector_t
Vector type used in the computations.
Definition: defs.hpp:27
Main namespace for bnmf-algs library.
Definition: alloc_model_funcs.hpp:12
void update_X_hat(const matrix_t< T > &nu, const matrix_t< T > &mu, matrix_t< T > &X_hat)
Definition: bld_appr.hpp:11