48 template <
typename T,
typename Real>
50 Real beta,
size_t max_iter = 1000) {
51 const auto x =
static_cast<size_t>(X.rows());
52 const auto y =
static_cast<size_t>(X.cols());
55 BNMF_ASSERT((X.array() >= 0).all(),
"X matrix must be nonnegative");
56 BNMF_ASSERT(r > 0,
"r must be positive");
69 const Real p = 2 - beta;
71 const auto p_int =
static_cast<int>(p);
72 const bool p_is_int = std::abs(p_int - p) <= eps;
74 matrix_t<T> X_hat, nom_W(x, z), denom_W(x, z), nom_H(z, y), denom_H(z, y);
75 while (max_iter-- > 0) {
80 if (p_is_int && p == 0) {
82 nom_W = X * H.transpose();
83 denom_W = X_hat * H.transpose();
84 }
else if (p_is_int && p == 1) {
86 nom_W = (X.array() / X_hat.array()).matrix() * H.transpose();
87 denom_W = matrixd::Ones(x, y) * H.transpose();
88 }
else if (p_is_int && p == 2) {
91 (X.array() / X_hat.array().square()).matrix() * H.transpose();
92 denom_W = (X_hat.array().inverse()).matrix() * H.transpose();
95 nom_W = (X.array() / X_hat.array().pow(p)).matrix() * H.transpose();
96 denom_W = (X_hat.array().pow(1 - p)).matrix() * H.transpose();
98 W = W.array() * nom_W.array() / denom_W.array();
104 if (p_is_int && p == 0) {
106 nom_H = W.transpose() * X;
107 denom_H = W.transpose() * X_hat;
108 }
else if (p_is_int && p == 1) {
110 nom_H = W.transpose() * (X.array() / X_hat.array()).matrix();
111 denom_H = W.transpose() * matrixd::Ones(x, y);
112 }
else if (p_is_int && p == 2) {
115 W.transpose() * (X.array() / X_hat.array().square()).matrix();
116 denom_H = W.transpose() * (X_hat.array().inverse()).matrix();
119 nom_H = W.transpose() * (X.array() / X_hat.array().pow(p)).matrix();
120 denom_H = W.transpose() * (X_hat.array().pow(1 - p)).matrix();
122 H = H.array() * nom_H.array() / denom_H.array();
157 template <
typename Real>
164 return x / (y + eps) -
std::log(x / (y + eps)) - 1;
171 logpart = x *
std::log(x / (y + eps));
173 return logpart - x + y;
180 return nom / (beta * (beta - 1));
200 template <
typename Tensor>
202 typename Tensor::value_type beta,
203 double eps = 1e-50) {
204 typename Tensor::value_type result = 0;
205 #pragma omp parallel for schedule(static) reduction(+:result) 206 for (
long i = 0; i < X.rows(); ++i) {
207 for (
long j = 0; j < X.cols(); ++j) {
234 template <
typename InputIterator1,
typename InputIterator2>
236 InputIterator2 second_begin,
237 decltype(*first_begin) beta,
double eps = 1e-50) {
auto beta_divergence_seq(InputIterator1 first_begin, InputIterator1 first_end, InputIterator2 second_begin, decltype(*first_begin) beta, double eps=1e-50)
Compute the -divergence as defined in .
Definition: nmf.hpp:235
std::pair< matrix_t< T >, matrix_t< T > > nmf(const matrix_t< T > &X, size_t r, Real beta, size_t max_iter=1000)
Compute nonnegative matrix factorization of a matrix.
Definition: nmf.hpp:49
Eigen::Matrix< Scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor > matrix_t
Matrix type used in the computations.
Definition: defs.hpp:41
Real beta_divergence(Real x, Real y, Real beta, double eps=1e-50)
Compute the -divergence as defined in .
Definition: nmf.hpp:158
Main namespace for bnmf-algs library.
Definition: alloc_model_funcs.hpp:12
T inner_product(T...args)