This examples demonstrates density estimation.
#include <random>
#include <string>
First define an auxiliary function randu that returns a random point (uniform, normal).
void randu(DataVector& rvar, std::mt19937& generator) {
std::uniform_real_distribution<double> distribution(0.0, 1.0);
for (
size_t j = 0;
j < rvar.getSize(); ++
j) {
rvar[
j] = distribution(generator);
}
}
void randn(DataVector& rvar, std::mt19937& generator) {
std::normal_distribution<double> distribution(0.5, 0.1);
for (
size_t j = 0;
j < rvar.getSize(); ++
j) {
double value = -1.0;
while (value <= 0.0 || value >= 1.0) {
value = distribution(generator);
}
}
}
Second define another auxiliary function that calls the one defined above multiple times and returns a matrix of random points (uniform, normal)
void randu(DataMatrix& rvar, std::uint64_t seedValue = std::mt19937_64::default_seed) {
size_t nsamples = rvar.getNrows(), ndim = rvar.getNcols();
std::mt19937 generator(seedValue);
DataVector sample(ndim);
for (
size_t i = 0;
i < nsamples; ++
i) {
randu(sample, generator);
}
}
void randn(DataMatrix& rvar, std::uint64_t seedValue = std::mt19937_64::default_seed) {
size_t nsamples = rvar.getNrows(), ndim = rvar.getNcols();
std::mt19937 generator(seedValue);
DataVector sample(ndim);
for (
size_t i = 0;
i < nsamples; ++
i) {
randn(sample, generator);
}
}
Now the main function begins by loading the test data from a file specified in the string filename.
int main(
int argc,
char** argv) {
Define number of dimensions of the toy problem.
Load normally distributed samples.
Configure the sparse grid of level 3 with linear basis functions and the same dimension as the given test data.
Alternatively load a sparse grid that has been saved to a file, see the commented line.
std::cout << "# create grid config" << std::endl;
gridConfig.
dim_ = numDims;
Configure the adaptive refinement. Therefore the number of refinements and the number of points are specified.
std::cout << "# create adaptive refinement config" << std::endl;
Configure the solver. The solver type is set to the conjugent gradient method and the maximum number of iterations, the tolerance epsilon and the threshold are specified.
std::cout << "# create solver config" << std::endl;
solverConfig.
eps_ = 1e-14;
Configure the regularization for the laplacian operator.
std::cout << "# create regularization config" << std::endl;
Configure the learner by specifying:
- ??enable,kfold?,
- an initial value for the lagrangian multiplier \(\lambda\) and the interval \( [\lambda_{Start} , \lambda_{End}] \) in which \(\lambda\) will be searched,
- whether a logarithmic scale is used,
- the parameters shuffle and an initial seed for the random value generation,
- whether parts of the output shall be kept off.
std::cout << "# create learner config" << std::endl;
crossvalidationConfig.
enable_ =
false;
crossvalidationConfig.
kfold_ = 3;
crossvalidationConfig.
lambda_ = 3.16228e-06;
crossvalidationConfig.
seed_ = 1234567;
crossvalidationConfig.
silent_ =
false;
std::cout << "# create learner config" << std::endl;
Create the learner using the configuratons set above. Then initialize it with the data read from the file in the first step and train the learner.
std::cout << "# creating the learner" << std::endl;
regularizationConfig, crossvalidationConfig,
sgdeConfig);
learner.initialize(samples);
Estimate the probability density function (pdf) via a gaussian kernel density estimation (KDE) and print the corresponding values.
std::cout << "--------------------------------------------------------\n";
std::cout << learner.getSurpluses().getSize() << " -> " << learner.getSurpluses().sum() << "\n";
std::cout << "pdf_SGDE(x) = " << learner.pdf(x) << " ~ " << kde.pdf(x) << " =pdf_KDE(x)\n";
std::cout << "mean_SGDE(x) = " << learner.mean() << " ~ " << kde.mean() << " = mean_KDE(x)\n";
std::cout << "var_SGDE(x) = " << learner.variance() << " ~ " << kde.variance() << "=var_KDE(x)\n";
for (
size_t idim = 0; idim < gridConfig.
dim_; idim++) {
bounds->
set(idim, 0, 0.0);
bounds->
set(idim, 1, 1.0);
}
Print the covariances.
std::cout << "---------------------- Cov_SGDE ------------------------------" << std::endl;
std::cout <<
C.toString() << std::endl;
std::cout << "---------------------- Cov KDE--------------------------------" << std::endl;
std::cout <<
C.toString() << std::endl;
Apply the inverse Rosenblatt transformation to a matrix of random points. To do this first generate the random points via randu, then initialize an inverse Rosenblatt transformation operation and apply it to the points. Finally print the calculated values.
std::cout << "------------------------------------------------------" << std::endl;
std::cout << "------------------------------------------------------" << std::endl;
std::cout << "uniform space" << std::endl;
std::cout <<
points.toString() << std::endl;
opInvRos->doTransformation(&learner.getSurpluses(), &
points, &pointsCdf);
To check whether the results are correct perform a Rosenform transformation on the data that has been created by the inverse Rosenblatt transformation above and print the calculated values.
opRos->doTransformation(&learner.getSurpluses(), &pointsCdf, &
points);
std::cout << "------------------------------------------------------" << std::endl;
std::cout << "original space" << std::endl;
std::cout << pointsCdf.toString() << std::endl;
std::cout << "------------------------------------------------------" << std::endl;
std::cout << "uniform space" << std::endl;
std::cout <<
points.toString() << std::endl;
}