1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
|
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/random/discrete_distribution.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace random_internal {
// Initializes the distribution table for Walker's Aliasing algorithm, described
// in Knuth, Vol 2. as well as in https://en.wikipedia.org/wiki/Alias_method
std::vector<std::pair<double, size_t>> InitDiscreteDistribution(
std::vector<double>* probabilities) {
// The empty-case should already be handled by the constructor.
assert(probabilities);
assert(!probabilities->empty());
// Step 1. Normalize the input probabilities to 1.0.
double sum = std::accumulate(std::begin(*probabilities),
std::end(*probabilities), 0.0);
if (std::fabs(sum - 1.0) > 1e-6) {
// Scale `probabilities` only when the sum is too far from 1.0. Scaling
// unconditionally will alter the probabilities slightly.
for (double& item : *probabilities) {
item = item / sum;
}
}
// Step 2. At this point `probabilities` is set to the conditional
// probabilities of each element which sum to 1.0, to within reasonable error.
// These values are used to construct the proportional probability tables for
// the selection phases of Walker's Aliasing algorithm.
//
// To construct the table, pick an element which is under-full (i.e., an
// element for which `(*probabilities)[i] < 1.0/n`), and pair it with an
// element which is over-full (i.e., an element for which
// `(*probabilities)[i] > 1.0/n`). The smaller value can always be retired.
// The larger may still be greater than 1.0/n, or may now be less than 1.0/n,
// and put back onto the appropriate collection.
const size_t n = probabilities->size();
std::vector<std::pair<double, size_t>> q;
q.reserve(n);
std::vector<size_t> over;
std::vector<size_t> under;
size_t idx = 0;
for (const double item : *probabilities) {
assert(item >= 0);
const double v = item * n;
q.emplace_back(v, 0);
if (v < 1.0) {
under.push_back(idx++);
} else {
over.push_back(idx++);
}
}
while (!over.empty() && !under.empty()) {
auto lo = under.back();
under.pop_back();
auto hi = over.back();
over.pop_back();
q[lo].second = hi;
const double r = q[hi].first - (1.0 - q[lo].first);
q[hi].first = r;
if (r < 1.0) {
under.push_back(hi);
} else {
over.push_back(hi);
}
}
// Due to rounding errors, there may be un-paired elements in either
// collection; these should all be values near 1.0. For these values, set `q`
// to 1.0 and set the alternate to the identity.
for (auto i : over) {
q[i] = {1.0, i};
}
for (auto i : under) {
q[i] = {1.0, i};
}
return q;
}
} // namespace random_internal
} // inline namespace lts_2019_08_08
} // namespace absl
|