bnmf-algs
bld_mult_funcs.hpp
Go to the documentation of this file.
1 #pragma once
2 
4 #include "defs.hpp"
5 
6 #ifdef USE_OPENMP
7 #include <omp.h>
8 #endif
9 
10 namespace bnmf_algs {
11 namespace details {
15 namespace bld_mult {
16 
36 template <typename T> tensor_t<T, 3> init_S(const matrix_t<T>& X, size_t z) {
37  const auto x = static_cast<size_t>(X.rows());
38  const auto y = static_cast<size_t>(X.cols());
39 
40 #ifdef USE_OPENMP
41  const int num_threads = std::thread::hardware_concurrency();
42 #endif
43 
44  tensor_t<T, 3> S(x, y, z);
45  vectord dirichlet_params = vectord::Constant(z, 1);
46  #pragma omp parallel
47  {
48 #ifdef USE_OPENMP
49  int thread_id = omp_get_thread_num();
50  bool last_thread = thread_id + 1 == num_threads;
51  long num_rows = x / num_threads;
52  long beg = thread_id * num_rows;
53  long end = !last_thread ? beg + num_rows : x;
54 #else
55  long beg = 0;
56  long end = x;
57 #endif
58 
59  util::gsl_rng_wrapper rand_gen(gsl_rng_alloc(gsl_rng_taus),
60  gsl_rng_free);
61  vectord dirichlet_variates(z);
62  for (long i = beg; i < end; ++i) {
63  for (size_t j = 0; j < y; ++j) {
64  gsl_ran_dirichlet(rand_gen.get(), z, dirichlet_params.data(),
65  dirichlet_variates.data());
66  for (size_t k = 0; k < z; ++k) {
67  S(i, j, k) = X(i, j) * dirichlet_variates(k);
68  }
69  }
70  }
71  }
72 
73  return S;
74 }
75 
86 template <typename T>
87 matrix_t<T> X_reciprocal(const matrix_t<T>& X, double eps) {
88  const auto x = static_cast<size_t>(X.rows());
89  const auto y = static_cast<size_t>(X.cols());
90 
92  #pragma omp parallel for schedule(static)
93  for (size_t i = 0; i < x; ++i) {
94  for (size_t j = 0; j < y; ++j) {
95  X_reciprocal(i, j) = 1.0 / (X(i, j) + eps);
96  }
97  }
98 
99  return X_reciprocal;
100 }
101 
110 template <typename Scalar>
113  vector_t<Scalar> alpha(params.alpha.size());
114  vector_t<Scalar> beta(params.beta.size());
115  std::copy(params.alpha.begin(), params.alpha.end(), alpha.data());
116  std::copy(params.beta.begin(), params.beta.end(), beta.data());
117 
118  return std::make_pair(alpha, beta);
119 }
120 
129 template <typename T>
130 void update_alpha_eph(const tensor_t<T, 2>& S_ipk, const vector_t<T>& alpha,
131  matrix_t<T>& alpha_eph) {
132  const auto x = static_cast<size_t>(S_ipk.dimension(0));
133  const auto z = static_cast<size_t>(S_ipk.dimension(1));
134 
135  #pragma omp parallel for schedule(static)
136  for (size_t i = 0; i < x; ++i) {
137  for (size_t k = 0; k < z; ++k) {
138  alpha_eph(i, k) = S_ipk(i, k) + alpha(i);
139  }
140  }
141 }
142 
151 template <typename T>
152 void update_beta_eph(const tensor_t<T, 2>& S_pjk, const vector_t<T>& beta,
153  matrix_t<T>& beta_eph) {
154  const auto y = static_cast<size_t>(S_pjk.dimension(0));
155  const auto z = static_cast<size_t>(S_pjk.dimension(1));
156 
157  #pragma omp parallel for schedule(static)
158  for (size_t j = 0; j < y; ++j) {
159  for (size_t k = 0; k < z; ++k) {
160  beta_eph(j, k) = S_pjk(j, k) + beta(k);
161  }
162  }
163 }
164 
176 template <typename T, typename PsiFunction>
177 void update_grad_plus(const tensor_t<T, 3>& S, const matrix_t<T>& beta_eph,
178  PsiFunction psi_fn, tensor_t<T, 3>& grad_plus) {
179  const auto x = static_cast<size_t>(S.dimension(0));
180  const auto y = static_cast<size_t>(S.dimension(1));
181  const auto z = static_cast<size_t>(S.dimension(2));
182 
183  #pragma omp parallel for schedule(static)
184  for (size_t i = 0; i < x; ++i) {
185  for (size_t j = 0; j < y; ++j) {
186  for (size_t k = 0; k < z; ++k) {
187  grad_plus(i, j, k) =
188  psi_fn(beta_eph(j, k)) - psi_fn(S(i, j, k) + 1);
189  }
190  }
191  }
192 }
193 
204 template <typename T, typename PsiFunction>
205 void update_grad_minus(const matrix_t<T>& alpha_eph, PsiFunction psi_fn,
206  matrix_t<T>& grad_minus) {
207 
208  const auto x = static_cast<size_t>(grad_minus.rows());
209  const auto z = static_cast<size_t>(grad_minus.cols());
210 
211  // update alpha_eph_sum
212  vector_t<T> psi_alpha_eph_sum(z);
213  #pragma omp parallel for schedule(static)
214  for (size_t k = 0; k < z; ++k) {
215  T sum = T();
216  for (size_t i = 0; i < x; ++i) {
217  sum += alpha_eph(i, k);
218  }
219  psi_alpha_eph_sum(k) = psi_fn(sum);
220  }
221 
222  // update grad_minus
223  #pragma omp parallel for schedule(static)
224  for (size_t i = 0; i < x; ++i) {
225  for (size_t k = 0; k < z; ++k) {
226  grad_minus(i, k) = psi_alpha_eph_sum(k) - psi_fn(alpha_eph(i, k));
227  }
228  }
229 }
230 
241 template <typename T>
243  const matrix_t<T>& grad_minus, const tensor_t<T, 3>& S,
244  matrix_t<T>& nom_mult) {
245  const auto x = static_cast<size_t>(S.dimension(0));
246  const auto y = static_cast<size_t>(S.dimension(1));
247  const auto z = static_cast<size_t>(S.dimension(2));
248 
249  #pragma omp parallel for schedule(static)
250  for (size_t i = 0; i < x; ++i) {
251  for (size_t j = 0; j < y; ++j) {
252  T nom_sum = T();
253  for (size_t k = 0; k < z; ++k) {
254  nom_sum += (S(i, j, k) * X_reciprocal(i, j) * grad_minus(i, k));
255  }
256  nom_mult(i, j) = nom_sum;
257  }
258  }
259 }
260 
271 template <typename T>
273  const tensor_t<T, 3>& grad_plus, const tensor_t<T, 3>& S,
274  matrix_t<T>& denom_mult) {
275  const auto x = static_cast<size_t>(S.dimension(0));
276  const auto y = static_cast<size_t>(S.dimension(1));
277  const auto z = static_cast<size_t>(S.dimension(2));
278 
279  #pragma omp parallel for schedule(static)
280  for (size_t i = 0; i < x; ++i) {
281  for (size_t j = 0; j < y; ++j) {
282  T denom_sum = T();
283  for (size_t k = 0; k < z; ++k) {
284  denom_sum +=
285  (S(i, j, k) * X_reciprocal(i, j) * grad_plus(i, j, k));
286  }
287  denom_mult(i, j) = denom_sum;
288  }
289  }
290 }
291 
306 template <typename T>
307 void update_S(const matrix_t<T>& X, const matrix_t<T>& nom,
308  const matrix_t<T>& denom, const matrix_t<T>& grad_minus,
309  const tensor_t<T, 3>& grad_plus, const tensor_t<T, 2>& S_ijp,
310  tensor_t<T, 3>& S, double eps) {
311  const auto x = static_cast<size_t>(S.dimension(0));
312  const auto y = static_cast<size_t>(S.dimension(1));
313  const auto z = static_cast<size_t>(S.dimension(2));
314 
315  // update S
316  #pragma omp parallel for schedule(static)
317  for (size_t i = 0; i < x; ++i) {
318  for (size_t j = 0; j < y; ++j) {
319  T s_ij = S_ijp(i, j);
320  T x_over_s = X(i, j) / (s_ij + eps);
321  for (size_t k = 0; k < z; ++k) {
322  S(i, j, k) *= (grad_plus(i, j, k) + nom(i, j)) * x_over_s /
323  (grad_minus(i, k) + denom(i, j) + eps);
324  }
325  }
326  }
327 }
328 
329 } // namespace bld_mult
330 } // namespace details
331 } // namespace bnmf_algs
Structure to hold the parameters for the Allocation Model .
Definition: alloc_model_params.hpp:25
T copy(T...args)
void update_denom_mult(const matrix_t< T > &X_reciprocal, const tensor_t< T, 3 > &grad_plus, const tensor_t< T, 3 > &S, matrix_t< T > &denom_mult)
Update denom_mult matrix used in bld_mult.
Definition: bld_mult_funcs.hpp:272
void update_grad_minus(const matrix_t< T > &alpha_eph, PsiFunction psi_fn, matrix_t< T > &grad_minus)
Update grad_minus tensor used in bld_mult.
Definition: bld_mult_funcs.hpp:205
void update_grad_plus(const tensor_t< T, 3 > &S, const matrix_t< T > &beta_eph, PsiFunction psi_fn, tensor_t< T, 3 > &grad_plus)
Update grad_plus tensor used in bld_mult.
Definition: bld_mult_funcs.hpp:177
void update_alpha_eph(const tensor_t< T, 2 > &S_ipk, const vector_t< T > &alpha, matrix_t< T > &alpha_eph)
Update alpha_eph matrix used in bld_mult.
Definition: bld_mult_funcs.hpp:130
matrix_t< T > X_reciprocal(const matrix_t< T > &X, double eps)
Compute the reciprocal of the input matrix .
Definition: bld_mult_funcs.hpp:87
Eigen::Tensor< Scalar, N, Eigen::RowMajor > tensor_t
Tensor type used in the computations.
Definition: defs.hpp:52
Eigen::Matrix< Scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor > matrix_t
Matrix type used in the computations.
Definition: defs.hpp:41
T end(T...args)
tensor_t< T, 3 > bld_mult(const matrix_t< T > &X, const size_t z, const alloc_model::Params< Scalar > &model_params, size_t max_iter=1000, bool use_psi_appr=false, double eps=1e-50)
Compute tensor , the solution of BLD problem , from matrix using multiplicative update rules...
Definition: bld_mult.hpp:83
T hardware_concurrency(T...args)
tensor_t< T, 3 > init_S(const matrix_t< T > &X, size_t z)
Initialize S tensor using a Dirichlet sample of size z with all concentration parameters set to 1...
Definition: bld_mult_funcs.hpp:36
void update_S(const matrix_t< T > &X, const matrix_t< T > &nom, const matrix_t< T > &denom, const matrix_t< T > &grad_minus, const tensor_t< T, 3 > &grad_plus, const tensor_t< T, 2 > &S_ijp, tensor_t< T, 3 > &S, double eps)
Update S tensor (output of bld_mult algorithm).
Definition: bld_mult_funcs.hpp:307
std::vector< Scalar > alpha
Parameter vector of Dirichlet prior for matrix of size .
Definition: alloc_model_params.hpp:37
void update_beta_eph(const tensor_t< T, 2 > &S_pjk, const vector_t< T > &beta, matrix_t< T > &beta_eph)
Update beta_eph matrix used in bld_mult.
Definition: bld_mult_funcs.hpp:152
T make_pair(T...args)
std::pair< vector_t< Scalar >, vector_t< Scalar > > init_alpha_beta(const alloc_model::Params< Scalar > &params)
Initialize alpha and beta vectors used in bld_mult.
Definition: bld_mult_funcs.hpp:112
T get(T...args)
void update_nom_mult(const matrix_t< T > &X_reciprocal, const matrix_t< T > &grad_minus, const tensor_t< T, 3 > &S, matrix_t< T > &nom_mult)
Update nom_mult matrix used in bld_mult.
Definition: bld_mult_funcs.hpp:242
T size(T...args)
T begin(T...args)
vector_t< double > vectord
Vector type specialization using double as Scalar value.
Definition: defs.hpp:32
std::vector< Scalar > beta
Parameter vector of Dirichlet prior for matrix of size .
Definition: alloc_model_params.hpp:42
Eigen::Matrix< Scalar, 1, Eigen::Dynamic, Eigen::RowMajor > vector_t
Vector type used in the computations.
Definition: defs.hpp:27
Main namespace for bnmf-algs library.
Definition: alloc_model_funcs.hpp:12