bnmf-algs
util.hpp
Go to the documentation of this file.
1 #pragma once
2 
3 #include "defs.hpp"
4 #include "util/generator.hpp"
5 #include <cstddef>
6 #include <utility>
7 
8 #ifdef USE_OPENMP
9 #include <thread>
10 #endif
11 
12 namespace bnmf_algs {
13 namespace details {
14 
15 // forward declaration needed to use this function in left_partition
16 template <typename IntIterator>
17 void right_partition(IntIterator begin, IntIterator end, IntIterator all_begin,
18  IntIterator all_end,
19  std::vector<std::pair<size_t, size_t>>& change_indices);
20 
49 template <typename IntIterator>
50 void left_partition(IntIterator begin, IntIterator end, IntIterator all_begin,
51  IntIterator all_end,
52  std::vector<std::pair<size_t, size_t>>& change_indices) {
53 
54  const auto k = end - begin;
55  const auto n = *(end - 1);
56 
57  // base cases
58  if (k == 1) {
59  return;
60  } else if (k == 2) {
61  // when k = 2, simple loop is enough.
62  auto& rightmost = *(end - 1);
63  auto& leftmost = *(end - 2);
64 
65  const size_t rightmost_index = (end - 1) - all_begin;
66  const size_t leftmost_index = (end - 2) - all_begin;
67 
68  while (rightmost > 0) {
69  --rightmost;
70  ++leftmost;
71  change_indices.emplace_back(rightmost_index, leftmost_index);
72  }
73  } else {
74  // compute partitions recursively
75  auto& rightmost = *(end - 1);
76  auto& leftmost = *begin;
77  auto& leftmost_next = *(begin + 1);
78 
79  const size_t rightmost_index = (end - 1) - all_begin;
80  const size_t leftmost_index = begin - all_begin;
81  const size_t leftmost_next_index = (begin + 1) - all_begin;
82 
83  while (true) {
84  left_partition(begin + 1, end, all_begin, all_end, change_indices);
85  --leftmost_next;
86  ++leftmost;
87  change_indices.emplace_back(leftmost_next_index, leftmost_index);
88 
89  if (leftmost == n) {
90  break;
91  }
92 
93  right_partition(begin + 1, end, all_begin, all_end, change_indices);
94  --rightmost;
95  ++leftmost;
96  change_indices.emplace_back(rightmost_index, leftmost_index);
97 
98  if (leftmost == n) {
99  break;
100  }
101  }
102  }
103 }
104 
133 template <typename IntIterator>
134 void right_partition(IntIterator begin, IntIterator end, IntIterator all_begin,
135  IntIterator all_end,
136  std::vector<std::pair<size_t, size_t>>& change_indices) {
137 
138  const auto k = end - begin;
139  const auto n = *begin;
140 
141  // base cases
142  if (k == 1) {
143  return;
144  } else if (k == 2) {
145  auto& leftmost = *begin;
146  auto& rightmost = *(begin + 1);
147 
148  const size_t leftmost_index = begin - all_begin;
149  const size_t rightmost_index = (begin + 1) - all_begin;
150 
151  while (leftmost > 0) {
152  --leftmost;
153  ++rightmost;
154  change_indices.emplace_back(leftmost_index, rightmost_index);
155  }
156  } else {
157  // compute new partitions recursively
158  auto& leftmost = *begin;
159  auto& rightmost = *(end - 1);
160  auto& rightmost_prev = *(end - 2);
161 
162  const size_t leftmost_index = begin - all_begin;
163  const size_t rightmost_index = (end - 1) - all_begin;
164  const size_t rightmost_prev_index = (end - 2) - all_begin;
165 
166  while (true) {
167  right_partition(begin, end - 1, all_begin, all_end, change_indices);
168  --rightmost_prev;
169  ++rightmost;
170  change_indices.emplace_back(rightmost_prev_index, rightmost_index);
171 
172  if (rightmost == n) {
173  break;
174  }
175 
176  left_partition(begin, end - 1, all_begin, all_end, change_indices);
177  --leftmost;
178  ++rightmost;
179  change_indices.emplace_back(leftmost_index, rightmost_index);
180 
181  if (rightmost == n) {
182  break;
183  }
184  }
185  }
186 }
187 } // namespace details
188 
193 namespace util {
194 
218 template <typename Function, typename Tuple, size_t... I>
219 auto call(Function f, Tuple t, std::index_sequence<I...> seq) {
220  return f(std::get<I>(t)...);
221 }
222 
244 template <typename Function, typename Tuple> auto call(Function f, Tuple t) {
245  static constexpr auto size = std::tuple_size<Tuple>::value;
246  return call(f, t, std::make_index_sequence<size>{});
247 }
248 
270 template <typename T> double sparseness(const tensor_t<T, 3>& S) {
271  // TODO: implement a method to unpack std::array
272  auto x = static_cast<size_t>(S.dimension(0));
273  auto y = static_cast<size_t>(S.dimension(1));
274  auto z = static_cast<size_t>(S.dimension(2));
275 
276  T sum = 0, squared_sum = 0;
277  #pragma omp parallel for schedule(static) reduction(+:sum,squared_sum)
278  for (size_t i = 0; i < x; ++i) {
279  for (size_t j = 0; j < y; ++j) {
280  for (size_t k = 0; k < z; ++k) {
281  sum += S(i, j, k);
282  squared_sum += S(i, j, k) * S(i, j, k);
283  }
284  }
285  }
286 
287  if (squared_sum < std::numeric_limits<double>::epsilon()) {
289  }
290 
291  double frob_norm = std::sqrt(squared_sum);
292  double axis_mult = std::sqrt(x * y * z);
293 
294  return (axis_mult - sum / frob_norm) / (axis_mult - 1);
295 }
296 
301 enum class NormType {
302  L1,
303  L2,
305  Inf
307 };
309 
320 template <typename Scalar, int N>
321 void normalize(tensor_t<Scalar, N>& input, size_t axis,
322  NormType type = NormType::L1, double eps = 1e-50) {
323  static_assert(N >= 1, "Tensor must be at least 1 dimensional");
324  BNMF_ASSERT(axis < N, "Normalization axis must be less than N");
325 
326  // calculate output dimensions
327  auto input_dims = input.dimensions();
328  Eigen::array<long, N - 1> output_dims;
329  for (size_t i = 0, j = 0; i < N; ++i) {
330  if (i != axis) {
331  output_dims[j] = input_dims[i];
332  ++j;
333  }
334  }
335 
336  // initialize thread device if using openmp
337 #ifdef USE_OPENMP
338  Eigen::ThreadPool tp(std::thread::hardware_concurrency());
339  Eigen::ThreadPoolDevice thread_dev(&tp,
341 #endif
342 
343  // calculate norms
344  tensor_t<Scalar, N - 1> norms(output_dims);
345  shape<1> reduction_dim{axis};
346  switch (type) {
347  case NormType::L1:
348 #ifdef USE_OPENMP
349  norms.device(thread_dev) = input.abs().sum(reduction_dim);
350 #else
351  norms = input.abs().sum(reduction_dim);
352 #endif
353  break;
354  case NormType::L2:
355 #ifdef USE_OPENMP
356  norms.device(thread_dev) = input.square().sum(reduction_dim);
357 #else
358  norms = input.square().sum(reduction_dim);
359 #endif
360  break;
361  case NormType::Inf:
362 #ifdef USE_OPENMP
363  norms.device(thread_dev) = input.abs().maximum(reduction_dim);
364 #else
365  norms = input.abs().maximum(reduction_dim);
366 #endif
367  break;
368  }
369 
370  // add epsilon to norms values to prevent division by zeros
371  Scalar* norms_begin = norms.data();
372  #pragma omp parallel for schedule(static)
373  for (long i = 0; i < norms.size(); ++i) {
374  norms_begin[i] += eps;
375  }
376 
377  // divide along each slice
378  int axis_dims = input.dimensions()[axis];
379  #pragma omp parallel for schedule(static)
380  for (int i = 0; i < axis_dims; ++i) {
381  input.chip(i, axis) /= norms;
382  }
383 }
384 
397 template <typename Scalar, int N>
399  NormType type = NormType::L1,
400  double eps = 1e-50) {
401  tensor_t<Scalar, N> out = input;
402  normalize(out, axis, type, eps);
403  return out;
404 }
405 
440 template <typename Real> Real psi_appr(Real x) {
441  Real extra = 0;
442 
443  // write each case separately to minimize number of divisions as much as
444  // possible
445  if (x < 1) {
446  const Real a = x + 1;
447  const Real b = x + 2;
448  const Real c = x + 3;
449  const Real d = x + 4;
450  const Real e = x + 5;
451  const Real ab = a * b;
452  const Real cd = c * d;
453  const Real ex = e * x;
454 
455  extra = ((a + b) * cd * ex + ab * d * ex + ab * c * ex + ab * cd * x +
456  ab * cd * e) /
457  (ab * cd * ex);
458  x += 6;
459  } else if (x < 2) {
460  const Real a = x + 1;
461  const Real b = x + 2;
462  const Real c = x + 3;
463  const Real d = x + 4;
464  const Real ab = a * b;
465  const Real cd = c * d;
466  const Real dx = d * x;
467 
468  extra =
469  ((a + b) * c * dx + ab * dx + ab * c * x + ab * cd) / (ab * cd * x);
470  x += 5;
471  } else if (x < 3) {
472  const Real a = x + 1;
473  const Real b = x + 2;
474  const Real c = x + 3;
475  const Real ab = a * b;
476  const Real cx = c * x;
477 
478  extra = ((a + b) * cx + (c + x) * ab) / (ab * cx);
479  x += 4;
480  } else if (x < 4) {
481  const Real a = x + 1;
482  const Real b = x + 2;
483  const Real ab = a * b;
484 
485  extra = ((a + b) * x + ab) / (ab * x);
486  x += 3;
487  } else if (x < 5) {
488  const Real a = x + 1;
489 
490  extra = (a + x) / (a * x);
491  x += 2;
492  } else if (x < 6) {
493  extra = 1 / x;
494  x += 1;
495  }
496 
497  Real x2 = x * x;
498  Real x4 = x2 * x2;
499  Real x6 = x4 * x2;
500  Real x8 = x6 * x2;
501  Real x10 = x8 * x2;
502  Real x12 = x10 * x2;
503  Real x13 = x12 * x;
504  Real x14 = x13 * x;
505 
506  // write the result of the formula simplified using symbolic calculation
507  // to minimize the number of divisions
508  Real res =
509  std::log(x) + (-360360 * x13 - 60060 * x12 + 6006 * x10 - 2860 * x8 +
510  3003 * x6 - 5460 * x4 + 15202 * x2 - 60060) /
511  (720720 * x14);
512 
513  return res - extra;
514 }
515 
559 template <typename Integer>
561  Integer k) {
562  BNMF_ASSERT(k > 0, "k must be greater than 0 in util::all_partitions");
563  BNMF_ASSERT(n > 0, "n must be greater than 0 in util::all_partitions");
564 
565  // initial partition
566  std::vector<Integer> vec(k, 0);
567  vec[0] = n;
568 
569  // partition towards right
571  details::right_partition(vec.begin(), vec.end(), vec.begin(), vec.end(),
572  result);
573 
574  return result;
575 }
576 
611 template <typename Integer>
613  // get indices to increment/decrement
614  auto part_change_vec = partition_change_indices(n, k);
615 
616  // compute partitions iteratively by applying increment/decrement operations
618  std::vector<Integer> partition(k, 0);
619  partition[0] = n;
620  result.push_back(partition);
621  size_t decr_idx, incr_idx;
622  for (const auto& change_idx : part_change_vec) {
623  std::tie(decr_idx, incr_idx) = change_idx;
624  --partition[decr_idx];
625  ++partition[incr_idx];
626  result.push_back(partition);
627  }
628 
629  return result;
630 }
631 
632 } // namespace util
633 } // namespace bnmf_algs
void right_partition(IntIterator begin, IntIterator end, IntIterator all_begin, IntIterator all_end, std::vector< std::pair< size_t, size_t >> &change_indices)
Partition the number stored at the beginning of the given sequence towards the end of the sequence by...
Definition: util.hpp:134
T tie(T...args)
Eigen::Tensor< Scalar, N, Eigen::RowMajor > tensor_t
Tensor type used in the computations.
Definition: defs.hpp:52
NormType
Type of the norm to be used by bnmf_algs::util::normalize and bnmf_algs::util::normalized.
Definition: util.hpp:301
T log(T...args)
T end(T...args)
T hardware_concurrency(T...args)
Eigen::array< size_t, N > shape
Shape of vectors, matrices, tensors, etc.
Definition: defs.hpp:66
void normalize(tensor_t< Scalar, N > &input, size_t axis, NormType type=NormType::L1, double eps=1e-50)
Normalize a tensor in-place along the given axis.
Definition: util.hpp:321
T push_back(T...args)
double sparseness(const tensor_t< T, 3 > &S)
Compute the sparseness of the given 3D tensor.
Definition: util.hpp:270
Real psi_appr(Real x)
Calculate digamma function approximately using the first 8 terms of the asymptotic expansion of digam...
Definition: util.hpp:440
void left_partition(IntIterator begin, IntIterator end, IntIterator all_begin, IntIterator all_end, std::vector< std::pair< size_t, size_t >> &change_indices)
Partition the number stored at the end of the given sequence towards the beginning of the sequence by...
Definition: util.hpp:50
auto call(Function f, Tuple t)
Forward the elements of a tuple to a function and return the result.
Definition: util.hpp:244
T begin(T...args)
std::vector< std::vector< Integer > > all_partitions(Integer n, Integer k)
Compute all possible k length partitions of given number n.
Definition: util.hpp:612
std::vector< std::pair< size_t, size_t > > partition_change_indices(Integer n, Integer k)
Compute the sequence of indices to be incremented and decremented to generate all partitions of numbe...
Definition: util.hpp:560
T sqrt(T...args)
Main namespace for bnmf-algs library.
Definition: alloc_model_funcs.hpp:12
tensor_t< Scalar, N > normalized(const tensor_t< Scalar, N > &input, size_t axis, NormType type=NormType::L1, double eps=1e-50)
Return a normalized copy of a tensor along the given axis.
Definition: util.hpp:398