3

我正在尝试使用简单的直线y=mx+c类型来拟合一些合成数据parallel-tempered mcmc。我的目标是能够理解如何使用它,以便以后可以应用于一些更复杂的模型。我正在尝试的示例是在一个简单的 emcee 代码中已经完成的复制:http: //dfm.io/emcee/current/user/line/ 但我不想使用 mcmc,而是使用并行处理 mcmc : http ://dfm.io/emcee/current/user/pt/

这是一个工作代码:

import numpy as np
from emcee import PTSampler
import emcee

# Choose the "true" parameters.
m_true = -0.9594
b_true = 4.294
f_true = 0.534

# Generate some synthetic data from the model.
N = 50
x = np.sort(10*np.random.rand(N))
yerr = 0.1+0.5*np.random.rand(N)
y = m_true*x+b_true
y += np.abs(f_true*y) * np.random.randn(N)
y += yerr * np.random.randn(N)


def lnlike(theta, x, y, yerr):
    m, b, lnf = theta
    model = m * x + b
    inv_sigma2 = 1.0/(yerr**2 + model**2*np.exp(2*lnf))
    return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
def lnprior(theta):
    m, b, lnf = theta
    if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0:
        return 0.0
    return -np.inf
def lnprob(theta, x, y, yerr):
    lp = lnprior(theta)
    if not np.isfinite(lp):
        return -np.inf
    return lp + lnlike(theta, x, y, yerr)

import scipy.optimize as op
nll = lambda *args: -lnlike(*args)
result = op.minimize(nll, [m_true, b_true, np.log(f_true)], args=(x, y, yerr))
m_ml, b_ml, lnf_ml = result["x"]
init = [0.5, m_ml, b_ml, lnf_ml]

ntemps = 10
nwalkers = 100
ndim = 3
from multiprocessing import Pool
pos = np.random.uniform(low=-1, high=1, size=(ntemps, nwalkers, ndim))
for i in range(ntemps):
    #initialize parameters near scipy optima
    pos[i:,] = np.array([result["x"] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)])
pool = Pool(processes=4)
sampler=PTSampler(ntemps,nwalkers, ndim, lnlike, lnprior, loglargs=(x, y, yerr), pool=pool)# args=(x, y, yerr))
#burn-in 
sampler.run_mcmc(pos, 1000)
sampler.reset()
sampler.run_mcmc(pos, 10000, thin=10)
samples = sampler.chain.reshape((-1, ndim))
print('Number of posterior samples is {}'.format(samples.shape[0]))
#print best fit value together with errors
print(map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
                             zip(*np.percentile(samples, [16, 50, 84],
                                                axis=0))))

import corner
fig = corner.corner(samples, labels=["$m$", "$b$", "$\ln\,f$"],
                      truths=[m_true, b_true, np.log(f_true)])
fig.savefig("triangle.png")

运行此代码时唯一的问题是我得到了远离真实值的最佳参数值。增加步行者或样本的数量在任何意义上都没有帮助。谁能告诉我为什么tempered-mcmc不在这里工作?

更新:

我发现了一个有用的包ptemceehttps://pypi.org/project/ptemcee/#description),虽然这个包的文档不存在。似乎这个包可能很有用,任何关于如何用这个包实现相同的线性拟合的帮助也将不胜感激。

4

1 回答 1

0

我修改了一些行

import time
import numpy as np
from emcee import PTSampler
import corner
import matplotlib.pyplot as plt
import scipy.optimize as op

t1 = time.time()

np.random.seed(6) # To reproduce results
# Choose the "true" parameters.
m_true = -0.9594
b_true = 4.294
f_true = 0.534

# Generate some synthetic data from the model.
N = 50
x = np.sort(10 * np.random.rand(N))
yerr = 0.1 + 0.5 * np.random.rand(N)
y_1 = m_true * x + b_true
y = np.abs(f_true * y_1) * np.random.randn(N) + y_1
y += yerr * np.random.randn(N)

plt.plot(x, y, 'o')


# With emcee

def lnlike(theta, x, y, yerr):
    m, b, lnf = theta
    model = m * x + b
    inv_sigma2 = 1.0/(yerr**2 + model**2*np.exp(2*lnf))
    return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
def lnprior(theta):
    m, b, lnf = theta
    if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0:
        return 0.0
    return -np.inf
def lnprob(theta, x, y, yerr):
    lp = lnprior(theta)
    if not np.isfinite(lp):
        return -np.inf
    return lp + lnlike(theta, x, y, yerr)


nll = lambda *args: -lnlike(*args)
result = op.minimize(nll, [m_true, b_true, np.log(f_true)], args=(x, y, yerr))
m_ml, b_ml, lnf_ml = result["x"]

init = [0.5, m_ml, b_ml, lnf_ml]

ntemps = 10
nwalkers = 100
ndim = 3

pos = np.random.uniform(low=-1, high=1, size=(ntemps, nwalkers, ndim))
for i in range(ntemps):
    pos[i:, :] = np.array([result["x"] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)])

sampler = PTSampler(ntemps, nwalkers, ndim, lnlike, lnprior, loglargs=(x, y, yerr), threads=4) # args=(x, y, yerr))

#burn-in 
print(pos.shape)
sampler.run_mcmc(pos, 100)
sampler.reset()
sampler.run_mcmc(pos, 5000, thin=10)
samples = sampler.chain.reshape((-1, ndim))

print('Number of posterior samples is {}'.format(samples.shape[0]))

#print best fit value together with errors

p1, p2, p3 = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
                             zip(*np.percentile(samples, [16, 50, 84],
                                                axis=0)))

print(p1, '\n', p2, '\n', p3)

fig = corner.corner(samples, labels=["$m$", "$b$", "$\ln\,f$"],
                      truths=[m_true, b_true, np.log(f_true)])

t2 = time.time()

print('It took {:.3f} s'.format(t2 - t1))

plt.show()

我得到的数字corner是:

在此处输入图像描述

重要的线是

sampler = PTSampler(ntemps, nwalkers, ndim, lnlike, lnprior, loglargs=(x, y, yerr), threads=4)

我用过threads=4而不是Pool.

仔细观察这一行,它打印出的print(p1, '\n', p2, '\n', p3)m_true,你会得到:b_truef_true

(-1.277782877669762, 0.5745273177144817, 2.0813620981463297) 
(4.800481378230051, 3.1747356851201163, 2.245189235990341) 
(-0.9391847529845194, 1.1196053087321716, 3.6017609114364273)

因为f,你需要np.exp(-0.93918),也就是0.3909,接近0.534。您得到的值很接近(-1.277比较-0.95944.8比较4.294),尽管错误还不错(除了f)。我的意思是,你希望得到确切的数字吗?用这个方法,在我的电脑里,需要111s才能完成,正常吗?

让我们尝试一些不同的东西。让我们明确一点:添加时问题并不容易f_true。我会用pymc3(你不需要知道怎么用pymc3,我要查看通过 找到的结果emcee)。

import time
import numpy as np
import corner
import matplotlib.pyplot as plt
import pymc3 as pm

t1 = time.time()

np.random.seed(6)
# Choose the "true" parameters.
m_true = -0.9594
b_true = 4.294
f_true = 0.534

# Generate some synthetic data from the model.
N = 50
x = np.sort(10 * np.random.rand(N))
yerr = 0.1 + 0.5 * np.random.rand(N)
y_1 = m_true * x + b_true
y = np.abs(f_true * y_1) * np.random.randn(N) + y_1
y += yerr * np.random.randn(N)

plt.plot(x, y, 'o')


with pm.Model() as model: # model specifications in PyMC3 are wrapped in a with-statement
    # Define priors
    f = pm.HalfCauchy('f', beta=5)
    m = pm.Normal('m', 0, sd=20)
    b = pm.Normal('b', 0, sd=20)

    mu2 = b + m * x
    sigma2 = yerr**2 + f**2 * (y_1)**2

    post = pm.Normal('y', mu=mu2, sd=pm.math.sqrt(sigma2), observed=y)

with model:
    trace = pm.sample(2000, tune=2000)

print(pm.summary(trace))
pm.traceplot(trace)

all_values = np.stack([trace.get_values('b'), trace.get_values('m'), trace.get_values('f')], axis=1)

fig2 = corner.corner(all_values, labels=["$b$", "$m$", "$f$"],
                      truths=[b_true, m_true, f_true])

t2 = time.time()

print('It took {:.3f} s'.format(t2 - t1))

plt.show()

总结是

       mean        sd  mc_error   hpd_2.5  hpd_97.5        n_eff      Rhat
m -0.995545  0.067818  0.001174 -1.123187 -0.857653  2685.610018  1.000121
b  4.398158  0.332526  0.005585  3.767336  5.057909  2746.736563  1.000201
f  0.425442  0.063884  0.000904  0.311037  0.554446  4195.591204  1.000309

重要的部分是 column mean,您会看到找到的值pymc3接近真实值。和列是hpd_2.5hpd_97.5的错误。它花了14秒。fbm

我得到的数字corner

在此处输入图像描述

你会说 的结果emcee不太好,但如果你真的想要更准确,你必须修改这个函数:

def lnprior(theta):
    m, b, lnf = theta
    if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0:
        return 0.0
    return -np.inf

著名的先验。在这种情况下,它是平坦的,并且由于有很多先验...

于 2019-05-31T04:41:27.043 回答