Gaussian Benchmarking

[1]:
import numpy as np
import ProxNest.utils as utils
import ProxNest.sampling as sampling
import ProxNest.optimisations as optimisations
import ProxNest.operators as operators

Generate mock data

[2]:
# Dimension of Gaussian
dimension = 200

# A simple identity forward model and redundant dictionary
phi = operators.sensing_operators.Identity()
psi = operators.sensing_operators.Identity()

# Generate a vector drawn from a Uniform distribution
image = np.random.rand(dimension, 1)

# Simulate some unit variance Gaussian noise on this random vector
sigma = 1
n = sigma*np.random.randn(dimension, 1)
image = image + n

Define parameters

[6]:
# Define a regularisation parameter (this should be tuned for a given problem)
delta = 1/2

# Parameter dictionary associated with optimisation problem of resampling from the prior subject to the likelihood iso-ball
params = utils.create_parameters_dict(
           y = image,                # Measurements i.e. data
         Phi = phi,                  # Forward model
     epsilon = 1e-3,                 # Radius of L2-ball of likelihood
       tight = True,                 # Is Phi a tight frame or not?
          nu = 1,                    # Bound on the squared-norm of Phi
         tol = 1e-10,                # Convergence tolerance of algorithm
    max_iter = 200,                  # Maximum number of iterations
     verbose = 0,                    # Verbosity level
           u = 0,                    # Initial vector for the dual problem
         pos = True,                 # Positivity flag
     reality = True                  # Reality flag
)

# Options dictionary associated with the overall sampling algorithm
options = utils.create_options_dict(
    samplesL = 2e4,                  # Number of live samples
    samplesD = 3e5,                  # Number of discarded samples
    thinning = 1e1,                  # Thinning factor (to mitigate correlations)
       delta = 1e-2,                 # Discretisation stepsize
        burn = 1e2,                  # Number of burn in samples
       sigma = sigma                 # Noise standard deviation of degraded image
)

Create lambda functions

[7]:
# Lambda functions to evaluate cost function
LogLikeliL = lambda sol : - np.linalg.norm(image-phi.dir_op(sol))**2/(2*sigma**2)

# Lambda function for L2-norm identity prior backprojection steps
proxH = lambda x, T : x - 2*T*psi.adj_op(psi.dir_op(x))*2*delta

# Lambda function for L2-ball likelihood projection during resampling
proxB = lambda x, tau: optimisations.l2_ball_proj.sopt_fast_proj_B2(x, tau, params)

Perform Proximal Nested Sampling

[8]:
# Select a starting position
X0 = np.abs(phi.adj_op(image))

# Perform proximal nested sampling
NS_BayEvi, NS_Trace = sampling.proximal_nested.ProxNestedSampling(X0, LogLikeliL, proxH, proxB, params, options)
rescaled_evidence_estimate = NS_BayEvi[0] + np.log(np.pi/delta)*(dimension/2)
ProxNest || Initialise: 100%|██████████| 200/200 [00:00<00:00, 41102.49it/s]
ProxNest || Populate: 100%|██████████| 200098/200098 [00:02<00:00, 90153.93it/s]
ProxNest || Sample: 100%|██████████| 300000/300000 [00:13<00:00, 22818.77it/s]
ProxNest || Compute Weights: 100%|██████████| 300000/300000 [00:00<00:00, 1758625.40it/s]
ProxNest || Trapezium Integrate: 100%|██████████| 299998/299998 [00:00<00:00, 2324879.32it/s]
ProxNest || Estimate Variance: 100%|██████████| 300000/300000 [00:00<00:00, 600760.56it/s]
ProxNest || Compute Posterior Mean: 100%|██████████| 300000/300000 [00:00<00:00, 667114.42it/s]

Evaluate analytic evidence

[10]:
detPar = 1/(2*delta+1/sigma**2)
ySquare= np.linalg.norm(image,'fro')**2
BayEvi_Val_gt_log = np.log(np.sqrt(((2*np.pi)**dimension)*(detPar**dimension))) + (-ySquare/(2*sigma**2)) + (detPar/2)*(ySquare/sigma**4)

Compare evidence estimates

[12]:
print(rescaled_evidence_estimate)
print(BayEvi_Val_gt_log)
44.97347444294445
48.985383360556256