[1]:
import hmclab
import matplotlib.pyplot as plt, numpy

Transforming existing distributions#

In this notebook we will show how to use a coordinate transform on any distribution. We define a Normal distribution in logarithmic space by simply initializing a Normal distribution and transforming it to logarithmic coordinates. The transformation described in this notebook can in principle be applied to any distribution.

[2]:
dimensions = 20
ones = numpy.ones((dimensions, 1))

prior_1 = hmclab.Distributions.Normal(
    means=3 * ones,
    covariance=0.05 * ones,
)
prior_1_l = hmclab.Distributions.TransformToLogSpace(prior_1)

Let’s generate samples from both distributions, as well as transforming the samples from the original distribution, to illustrate exactly what is happening. The samples from the transformed distribution and the transformed samples of the original distribution should be the same.

[3]:
n_samples = 1000000

plt.figure(figsize=(16, 5))

plt.subplot(131)
plt.title("Samples from original distribution")
samples = prior_1.generate(n_samples)
plt.hist2d(samples[0, :], samples[1, :], bins=100, cmap=plt.get_cmap("Greys"))
plt.xlabel("$\log_{10}(c0)$")
plt.ylabel("$\log_{10}(c1)$")

plt.subplot(132)
plt.title("Samples from original distribution,\ntransformed using 10**(samples)")
samples = 10**samples
_, bins1, bins2, _ = plt.hist2d(
    samples[0, :], samples[1, :], bins=100, cmap=plt.get_cmap("Greys")
)
plt.xlabel("coordinate 0")
plt.ylabel("coordinate 1")

plt.subplot(133)
plt.title("Samples from transformed distribution")
samples = prior_1_l.generate(n_samples)
_ = plt.hist2d(
    samples[0, :], samples[1, :], bins=[bins1, bins2], cmap=plt.get_cmap("Greys")
)
plt.xlabel("coordinate 0")
plt.ylabel("coordinate 1")
[3]:
Text(0, 0.5, 'coordinate 1')
../../_images/notebooks_tutorials_5_-_Transforming_parameters_4_1.png

That looks like some good agreement! Let’s test if this transformation also works with gradient based and standard Random Walk sampling, applying both HMC and RWMH:

[4]:
initial_model = prior_1_l.generate()
# import numpy
# initial_model = 400.0 * numpy.ones((100,1))

sampler_rwmh = hmclab.Samplers.RWMH()

sampler_rwmh.sample(
    "bin_samples/tutorial_5_rwmh.h5",
    prior_1_l,
    stepsize=10.0,
    initial_model=initial_model,
    overwrite_existing_file=True,
    online_thinning=1,
    autotuning=True,
    proposals=100000,
    max_time=1.0,
)

sampler_hmc = hmclab.Samplers.HMC()

_ = sampler_hmc.sample(
    "bin_samples/tutorial_5_hmc.h5",
    prior_1_l,
    stepsize=100.0,
    initial_model=initial_model,
    overwrite_existing_file=True,
    online_thinning=1,
    amount_of_steps=10,
    autotuning=True,
    proposals=20000,
    max_time=1.0,
)

sampler_hmc_many_samples = hmclab.Samplers.HMC()

_ = sampler_hmc_many_samples.sample(
    "bin_samples/tutorial_5_hmc_many_samples.h5",
    prior_1_l,
    stepsize=100.0,
    initial_model=initial_model,
    overwrite_existing_file=True,
    online_thinning=1,
    amount_of_steps=10,
    autotuning=True,
    proposals=100000,
    max_time=10.0,
    disable_progressbar=True,
)

sampler_rwmh._repr_html_(), sampler_hmc._repr_html_()

[4]:
('', '')
[5]:
samples_rwmh = sampler_rwmh.load_results()
samples_hmc = sampler_hmc.load_results()
samples_hmc_many_samples = sampler_hmc_many_samples.load_results()

Let’s visualize the result of this sampling. To quantify the extremes, we’ll make the vertical scale of the histograms logarithmic.

[6]:
plt.figure(figsize=(20, 10))

parameter_to_plot = 0

plt.subplot(241)
plt.title("Direct samples")
_, bins, _ = plt.hist(
    samples[parameter_to_plot, :],
    range=[0, 5000],
    bins=50,
    color="k",
    density=True,
)
plt.ylim([1e-6, 1e-3])
plt.ylabel("occurence")
plt.xlabel(f"parameter {parameter_to_plot}")
plt.gca().set_yscale("log")

plt.subplot(242)
plt.title("HMC time=10sec")
plt.hist(
    samples_hmc_many_samples[parameter_to_plot, :],
    bins=bins,
    color="r",
    density=True,
)
plt.ylim([1e-6, 1e-3])
plt.ylabel("occurence")
plt.xlabel(f"parameter {parameter_to_plot}")
plt.gca().set_yscale("log")

plt.subplot(243)
plt.title("HMC time=1sec")
plt.hist(
    samples_hmc[parameter_to_plot, :],
    bins=bins,
    color="r",
    density=True,
)
plt.ylim([1e-6, 1e-3])
plt.ylabel("occurence")
plt.xlabel(f"parameter {parameter_to_plot}")
plt.gca().set_yscale("log")

plt.subplot(244)
plt.title("RWMH time=1sec")
plt.hist(
    samples_rwmh[parameter_to_plot, :],
    bins=bins,
    color="b",
    density=True,
)
plt.ylim([1e-6, 1e-3])
plt.ylabel("occurence")
plt.xlabel(f"parameter {parameter_to_plot}")
plt.gca().set_yscale("log")


### Second row

plt.subplot(245)
_, bins, _ = plt.hist(
    samples[parameter_to_plot, :],
    range=[0, 5000],
    bins=50,
    color="k",
    density=True,
)
plt.ylim([1e-6, 1e-3])
plt.ylabel("occurence")
plt.xlabel(f"parameter {parameter_to_plot}")

plt.subplot(246)
plt.hist(
    samples_hmc_many_samples[parameter_to_plot, :],
    bins=bins,
    color="r",
    density=True,
)
plt.ylim([1e-6, 1e-3])
plt.ylabel("occurence")
plt.xlabel(f"parameter {parameter_to_plot}")

plt.subplot(247)
plt.hist(
    samples_hmc[parameter_to_plot, :],
    bins=bins,
    color="r",
    density=True,
)
plt.ylim([1e-6, 1e-3])
plt.ylabel("occurence")
plt.xlabel(f"parameter {parameter_to_plot}")

plt.subplot(248)
plt.hist(
    samples_rwmh[parameter_to_plot, :],
    bins=bins,
    color="b",
    density=True,
)
plt.ylim([1e-6, 1e-3])
plt.ylabel("occurence")
plt.xlabel(f"parameter {parameter_to_plot}")
[6]:
Text(0.5, 0, 'parameter 0')
../../_images/notebooks_tutorials_5_-_Transforming_parameters_9_1.png

All three Markov chains seem to sample the distribution well, but there are strong differences in between how HMC and RWMH sample in limited time. The histogram for 1 second of HMC seems more jagged (roughed) around the mode (parameter 0 = 1000), but the tail towards the high parameters is comparatively better sampled than that of RWMH.

Let’s now try this for already a non-Gaussian distribution, e.g. a uniform dsitribution.

[7]:
dimensions = 1
ones = numpy.ones((dimensions, 1))

prior_2 = hmclab.Distributions.Uniform(
    lower_bounds=2.7 * ones,
    upper_bounds=4 * ones,
)
prior_2_l = hmclab.Distributions.TransformToLogSpace(prior_2)
[8]:
_ = plt.hist(prior_2_l.generate(100000).flatten(), bins=50)
plt.title("Log-transformed uniform distribution")
plt.ylabel("occurence")
plt.xlabel(f"parameter 0")
[8]:
Text(0.5, 0, 'parameter 0')
../../_images/notebooks_tutorials_5_-_Transforming_parameters_13_1.png

Looking good! Use these transforms or build your own to make appropriate prior information.