using Random: seed!
#myseed = 202002011513
myseed = 202002081716
using Distributions
using Plots
pyplot(fmt=:svg)
pyplotclf() = backend() == Plots.PyPlotBackend() && PyPlot.clf()
using Base64
displayfile(mime, file; tag="img") = open(file) do f
base64 = base64encode(f)
display("text/html", """<$(tag) src="data:$(mime);base64,$(base64)"/>""")
end
using Printf
rd(x; digits=3) = round(x; digits=digits)
rd (generic function with 1 method)
Dice(p) = Categorical(p/sum(p))
diceA = Dice([1, 1, 1, 1, 1, 1])
diceB = Dice([3, 2, 2, 1, 1, 1])
diceC = Dice([1, 1, 1, 2, 2, 3])
@show diceA.p .|> rd
@show diceB.p .|> rd
@show diceC.p .|> rd;
diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3]
sampledist(dist, n) = Product(fill(dist, n))
function model(diceA, diceB, diceC, prior, n)
MixtureModel([sampledist(diceA, n), sampledist(diceB, n), sampledist(diceC, n)], prior)
end
model (generic function with 1 method)
prior = Dice([1, 1, 1])
@show prior.p .|> rd;
prior.p .|> rd = [0.333, 0.333, 0.333]
diceX = Dice([1,1,1.5,1.5,2,3]);
@show diceX.p .|> rd;
diceX.p .|> rd = [0.1, 0.1, 0.15, 0.15, 0.2, 0.3]
seed!(myseed)
n = 30
sample = rand(diceX, n)
@show sample;
sample = [5, 4, 4, 3, 5, 3, 4, 2, 5, 3, 6, 2, 6, 6, 3, 6, 5, 5, 6, 3, 4, 6, 4, 1, 5, 4, 2, 5, 3, 6]
function empirical_dist(r, sample)
isempty(sample) && return Dice(fill(1.0, r))
p = zeros(r)
for i in sample
p[i] += 1.0
end
Dice(p)
end
edist = empirical_dist(6, sample)
@show diceX.p .|> rd;
@show edist.p .|> rd;
diceX.p .|> rd = [0.1, 0.1, 0.15, 0.15, 0.2, 0.3] edist.p .|> rd = [0.033, 0.1, 0.2, 0.2, 0.233, 0.233]
function posterior_prob(diceA, diceB, diceC, prior, sample, k)
n = length(sample)
Z = model(diceA, diceB, diceC, prior, n)
@inline s(k) = sampledist((diceA, diceB, diceC)[k], n)
exp(logpdf(prior, k) + logpdf(s(k), sample) - logpdf(Z, sample))
end
function posterior_dist(diceA, diceB, diceC, prior, sample)
isempty(sample) && return prior
Categorical(posterior_prob.(diceA, diceB, diceC, prior, Ref(sample), 1:3))
end
posterior = posterior_dist(diceA, diceB, diceC, prior, sample)
@show prior.p .|> rd;
@show posterior.p .|> rd;
prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.202, 0.0, 0.798]
function predictive_prob(diceA, diceB, diceC, prior, sample, x)
n = length(sample)
@inline Z(n) = model(diceA, diceB, diceC, prior, n)
if iszero(n)
pdf(Z(1), [x])
else
exp(logpdf(Z(n+1), [sample; x]) - logpdf(Z(n), sample))
end
end
function predictive_dist(diceA, diceB, diceC, prior, sample)
Categorical(predictive_prob.(diceA, diceB, diceC, prior, Ref(sample), 1:6))
end
predX = predictive_dist(diceA, diceB, diceC, prior, sample)
@show diceX.p .|> rd
@show predX.p .|> rd;
diceX.p .|> rd = [0.1, 0.1, 0.15, 0.15, 0.2, 0.3] predX.p .|> rd = [0.113, 0.113, 0.113, 0.193, 0.193, 0.273]
function print_result(;
diceX = Dice([1,1,1,2,2,3]),
N = 30,
sample = rand(diceX, N),
diceA = Dice([1,1,1,1,1,1]),
diceB = Dice([3,2,2,1,1,1]),
diceC = Dice([1,1,1,2,2,3]),
prior = Dice([1,1,1])
)
N = length(sample)
posterior = posterior_dist(diceA, diceB, diceC, prior, sample)
predX = predictive_dist(diceA, diceB, diceC, prior, sample)
edist = empirical_dist(ncategories(diceX), sample)
println("---------- Model:")
@show diceA.p .|> rd
@show diceB.p .|> rd
@show diceC.p .|> rd
@show prior.p .|> rd
println()
println("---------- Dice X and sample:")
@show diceX.p .|> rd
@show sample
@show n = length(sample)
println()
println("---------- Results:")
@show prior.p .|> rd
@show posterior.p .|> rd
println()
@show diceX.p .|> rd
@show edist.p .|> rd
@show predX.p .|> rd
nothing
end
print_result()
---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] sample = [3, 5, 5, 6, 2, 6, 6, 6, 6, 5, 1, 6, 2, 3, 5, 5, 1, 4, 2, 3, 5, 6, 6, 6, 5, 5, 4, 5, 6, 5] n = length(sample) = 30 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.018, 0.0, 0.982] diceX.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] edist.p .|> rd = [0.067, 0.1, 0.1, 0.067, 0.333, 0.333] predX.p .|> rd = [0.101, 0.101, 0.101, 0.199, 0.199, 0.298]
function animate_Bayes(fn = "test.gif";
fn_no_true_dist = replace(fn, r"\.[^.]*$"=>"_no_true_dist.gif"),
fps = 10,
step = 1,
diceX = Dice([1,1,2,1,2,3]),
N = 100,
sample = rand(diceX, N),
diceA = Dice([1,1,1,1,1,1]),
diceB = Dice([3,2,2,1,1,1]),
diceC = Dice([1,1,1,2,2,3]),
prior = Dice([1,1,1])
)
N = length(sample)
@inline posterior(n) = posterior_dist(diceA, diceB, diceC, prior, @view(sample[1:n]))
@inline predX(n) = predictive_dist(diceA, diceB, diceC, prior, @view(sample[1:n]))
@inline edist(n) = empirical_dist(ncategories(diceX), @view(sample[1:n]))
ymax = maximum(vcat((n -> predX(n).p).(0:N)..., diceX.p))#, (n -> edist(n).p).(0:N)...))
anim = @animate for n in [fill(0, 5); 0:step:N; fill(N, 5)]
P1 = plot()
plot!(legend=:outertop)
plot!(ylim=(-0.03ymax, 1.2ymax), ytick=0:0.05:1)
bar!(predX(n).p; label="Predictive distribution", alpha=0.5)
bar!(diceX.p; label="True distribution of Dice X", alpha=0.2)
scatter!(edist(n).p; label="Empirical distribution of sample", color=:red)
P2 = plot()
plot!(title="Posterior n=" * @sprintf("%03d", n))
plot!(xtick=(1:3, ["Dice A", "Dice B", "Dice C"]), ylim=(-0.03, 1.03), ytick=0:0.1:1)
bar!(posterior(n).p; label="", alpha=0.5)
plot(P1, P2; size=(600, 400), layout=@layout([a{0.7w} b{0.3w}]))
plot!(titlefontsize=10, legendfontsize=9)
end
anim_no_true_dist = @animate for n in [fill(0, 5); 0:step:N; fill(N, 5)]
P1 = plot()
plot!(legend=:outertop)
plot!(ylim=(-0.03ymax, 1.2ymax), ytick=0:0.05:1)
bar!(predX(n).p; label="Predictive distribution", alpha=0.5)
#bar!(diceX.p; label="True distribution of Dice X", alpha=0.2)
scatter!(edist(n).p; label="Empirical distribution of sample", color=:red)
P2 = plot()
plot!(title="Posterior n=" * @sprintf("%03d", n))
plot!(xtick=(1:3, ["Dice A", "Dice B", "Dice C"]), ylim=(-0.03, 1.03), ytick=0:0.1:1)
bar!(posterior(n).p; label="", alpha=0.5)
plot(P1, P2; size=(600, 400), layout=@layout([a{0.7w} b{0.3w}]))
plot!(titlefontsize=10, legendfontsize=9)
end
pyplotclf()
gif(anim, fn; fps=fps)
gif(anim_no_true_dist, fn_no_true_dist; fps=fps)
sleep(0.1)
displayfile("image/gif", fn)
displayfile("image/gif", fn_no_true_dist)
end
@time animate_Bayes()
┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\test.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98 ┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\test_no_true_dist.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98
101.380054 seconds (95.13 M allocations: 4.712 GiB, 1.98% gc time)
最初に
のどれかを同じ確率で選び, その後は選ばれたサイコロを続けてふる, という数学的モデルでベイズ法を実行してみよう.
diceA = Dice([1,1,1,1,1,1])
diceB = Dice([3,2,2,1,1,1])
diceC = Dice([1,1,1,2,2,3])
@show diceA.p .|> rd
@show diceB.p .|> rd
@show diceC.p .|> rd
ymax = maximum(vcat(diceA.p, diceB.p, diceC.p))
pal = palette(:default)
P1 = bar(diceA.p; ylim=(-0.05*ymax, 1.2ymax), color=pal[1], label="Dice A", alpha=0.5)
P2 = bar(diceB.p; ylim=(-0.05*ymax, 1.2ymax), color=pal[2], label="Dice B", alpha=0.5)
P3 = bar(diceC.p; ylim=(-0.05*ymax, 1.2ymax), color=pal[3], label="Dice C", alpha=0.5)
plot(P1, P2, P3; size=(600, 200), layout=(1,3))
diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3]
サイコロXがサイコロCと同じ確率分布を持つサイコロであるとき, サイコロXのサイズ N=30 のサンプルを用いて, ベイズ法を実行する.
以下の場合では(サンプルの偏り方によって結果は変わることに注意せよ), 事後分布 posterior.p の値を見ればわかるように, サイズ N=30 のサンプル全体を使った場合には, モデル内でサンプルと同じ出目が生成されたときに, サイコロCが最初に選ばれていた確率は92.2%になるという結果が得られており, ベイズ法による推測が非常にうまく行っているように見える. 予測分布 predX.p もサイコロXの確率分布 diceX.p に非常に近い値になっている.
サイズ N=30 のサンプルのうち最初の n=10,20,30 個を使ったベイズ推測の結果を表示させている. n が大きくなるにつれて, 推定の精度が高まって行くこともわかる.
seed!(myseed)
diceX = Dice([1,1,1,2,2,3])
N = 80
sample = rand(diceX, N)
@time animate_Bayes("Bayes1-1.gif"; diceX=diceX, sample=sample)
for n in 20:30:80
println("\n==================== n = $n")
print_result(diceX=diceX, sample=sample[1:n])
end
┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\Bayes1-1.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98 ┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\Bayes1-1_no_true_dist.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98
51.956759 seconds (18.45 M allocations: 607.012 MiB, 0.44% gc time) ==================== n = 20 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] sample = [5, 4, 4, 3, 5, 3, 4, 2, 5, 6, 6, 2, 6, 6, 3, 6, 6, 5, 6, 6] n = length(sample) = 20 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.032, 0.0, 0.968] diceX.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] edist.p .|> rd = [0.0, 0.1, 0.15, 0.15, 0.2, 0.4] predX.p .|> rd = [0.102, 0.102, 0.102, 0.199, 0.199, 0.296] ==================== n = 50 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] sample = [5, 4, 4, 3, 5, 3, 4, 2, 5, 6, 6, 2, 6, 6, 3, 6, 6, 5, 6, 6, 4, 6, 4, 1, 6, 4, 2, 5, 3, 5, 3, 5, 5, 6, 2, 6, 6, 6, 6, 5, 1, 6, 2, 3, 5, 5, 1, 4, 2, 3] n = length(sample) = 50 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.011, 0.0, 0.989] diceX.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] edist.p .|> rd = [0.06, 0.12, 0.14, 0.14, 0.22, 0.32] predX.p .|> rd = [0.101, 0.101, 0.101, 0.2, 0.2, 0.299] ==================== n = 80 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] sample = [5, 4, 4, 3, 5, 3, 4, 2, 5, 6, 6, 2, 6, 6, 3, 6, 6, 5, 6, 6, 4, 6, 4, 1, 6, 4, 2, 5, 3, 5, 3, 5, 5, 6, 2, 6, 6, 6, 6, 5, 1, 6, 2, 3, 5, 5, 1, 4, 2, 3, 5, 6, 6, 6, 5, 5, 4, 5, 6, 5, 6, 1, 4, 6, 6, 4, 1, 1, 6, 5, 3, 3, 1, 6, 6, 4, 2, 4, 5, 4] n = length(sample) = 80 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.0, 0.0, 1.0] diceX.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] edist.p .|> rd = [0.088, 0.088, 0.112, 0.162, 0.225, 0.325] predX.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3]
同モデルで, 今度は, サイコロXがモデルのサイコロBに近いがそれとは少し違う確率分布を持つ場合にどうなるかを確認してみよう.
この場合には, 予測分布 predX.p がサイコロBの分布に近付いて行くことがわかる.
サンプルサイズ n を大きくして行くと, 予測分布はサイコロA, B, Cの中でサイコロXに最も近い確率分布に予測分布が近付いて行くことを一般的に証明できる.
seed!(myseed)
diceX = Dice([2.7, 2.3, 1.5, 1.2, 1.3, 1.0])
N = 500
sample = rand(diceX, N)
@time animate_Bayes("Bayes1-2.gif"; step=4, diceX=diceX, sample=sample)
for n in 100:200:500
println("\n==================== n = $n")
print_result(diceX=diceX, sample=sample[1:n])
end
┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\Bayes1-2.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98 ┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\Bayes1-2_no_true_dist.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98
81.487504 seconds (41.98 M allocations: 2.011 GiB, 0.83% gc time) ==================== n = 100 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.27, 0.23, 0.15, 0.12, 0.13, 0.1] sample = [5, 4, 1, 3, 5, 3, 4, 2, 5, 3, 2, 2, 6, 2, 3, 6, 2, 5, 1, 3, 4, 6, 4, 1, 2, 4, 2, 5, 3, 1, 3, 5, 5, 2, 2, 6, 2, 2, 2, 5, 1, 3, 2, 3, 5, 5, 1, 4, 2, 3, 5, 2, 6, 3, 1, 1, 1, 1, 6, 1, 2, 1, 1, 6, 2, 4, 1, 1, 2, 5, 3, 3, 1, 1, 2, 4, 2, 4, 5, 4, 5, 6, 2, 6, 4, 1, 3, 4, 2, 6, 5, 2, 1, 6, 1, 2, 5, 1, 4, 2] n = length(sample) = 100 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.727, 0.273, 0.0] diceX.p .|> rd = [0.27, 0.23, 0.15, 0.12, 0.13, 0.1] edist.p .|> rd = [0.21, 0.25, 0.14, 0.13, 0.16, 0.11] predX.p .|> rd = [0.203, 0.176, 0.176, 0.148, 0.148, 0.148] ==================== n = 300 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.27, 0.23, 0.15, 0.12, 0.13, 0.1] sample = [5, 4, 1, 3, 5, 3, 4, 2, 5, 3, 2, 2, 6, 2, 3, 6, 2, 5, 1, 3, 4, 6, 4, 1, 2, 4, 2, 5, 3, 1, 3, 5, 5, 2, 2, 6, 2, 2, 2, 5, 1, 3, 2, 3, 5, 5, 1, 4, 2, 3, 5, 2, 6, 3, 1, 1, 1, 1, 6, 1, 2, 1, 1, 6, 2, 4, 1, 1, 2, 5, 3, 3, 1, 1, 2, 4, 2, 4, 5, 4, 5, 6, 2, 6, 4, 1, 3, 4, 2, 6, 5, 2, 1, 6, 1, 2, 5, 1, 4, 2, 3, 5, 5, 6, 6, 3, 6, 3, 1, 4, 3, 1, 3, 1, 6, 5, 4, 4, 6, 3, 2, 5, 3, 1, 4, 3, 2, 5, 5, 2, 5, 1, 3, 6, 6, 1, 4, 2, 4, 1, 1, 5, 4, 2, 5, 1, 1, 4, 4, 1, 4, 6, 6, 4, 4, 3, 1, 5, 6, 5, 1, 3, 2, 3, 1, 1, 3, 2, 1, 3, 2, 6, 5, 2, 1, 5, 6, 2, 4, 1, 3, 2, 5, 1, 1, 1, 1, 1, 5, 6, 3, 1, 2, 2, 2, 1, 2, 3, 2, 1, 6, 5, 3, 6, 5, 4, 2, 2, 1, 3, 3, 4, 2, 4, 5, 2, 6, 2, 4, 6, 4, 2, 2, 4, 2, 1, 1, 5, 2, 6, 3, 3, 5, 1, 1, 1, 1, 2, 3, 5, 2, 1, 4, 1, 6, 5, 5, 2, 5, 6, 1, 1, 2, 1, 1, 2, 2, 3, 5, 3, 6, 3, 2, 4, 3, 2, 3, 2, 4, 1, 6, 1, 3, 1, 3, 5, 2, 2, 5, 1, 3, 1, 1, 5, 2, 2, 2, 2, 2, 5, 3, 4, 3, 1, 1, 1, 2, 1, 6, 4] n = length(sample) = 300 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.065, 0.935, 0.0] diceX.p .|> rd = [0.27, 0.23, 0.15, 0.12, 0.13, 0.1] edist.p .|> rd = [0.233, 0.223, 0.157, 0.123, 0.15, 0.113] predX.p .|> rd = [0.291, 0.198, 0.198, 0.104, 0.104, 0.104] ==================== n = 500 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.27, 0.23, 0.15, 0.12, 0.13, 0.1] sample = [5, 4, 1, 3, 5, 3, 4, 2, 5, 3, 2, 2, 6, 2, 3, 6, 2, 5, 1, 3, 4, 6, 4, 1, 2, 4, 2, 5, 3, 1, 3, 5, 5, 2, 2, 6, 2, 2, 2, 5, 1, 3, 2, 3, 5, 5, 1, 4, 2, 3, 5, 2, 6, 3, 1, 1, 1, 1, 6, 1, 2, 1, 1, 6, 2, 4, 1, 1, 2, 5, 3, 3, 1, 1, 2, 4, 2, 4, 5, 4, 5, 6, 2, 6, 4, 1, 3, 4, 2, 6, 5, 2, 1, 6, 1, 2, 5, 1, 4, 2, 3, 5, 5, 6, 6, 3, 6, 3, 1, 4, 3, 1, 3, 1, 6, 5, 4, 4, 6, 3, 2, 5, 3, 1, 4, 3, 2, 5, 5, 2, 5, 1, 3, 6, 6, 1, 4, 2, 4, 1, 1, 5, 4, 2, 5, 1, 1, 4, 4, 1, 4, 6, 6, 4, 4, 3, 1, 5, 6, 5, 1, 3, 2, 3, 1, 1, 3, 2, 1, 3, 2, 6, 5, 2, 1, 5, 6, 2, 4, 1, 3, 2, 5, 1, 1, 1, 1, 1, 5, 6, 3, 1, 2, 2, 2, 1, 2, 3, 2, 1, 6, 5, 3, 6, 5, 4, 2, 2, 1, 3, 3, 4, 2, 4, 5, 2, 6, 2, 4, 6, 4, 2, 2, 4, 2, 1, 1, 5, 2, 6, 3, 3, 5, 1, 1, 1, 1, 2, 3, 5, 2, 1, 4, 1, 6, 5, 5, 2, 5, 6, 1, 1, 2, 1, 1, 2, 2, 3, 5, 3, 6, 3, 2, 4, 3, 2, 3, 2, 4, 1, 6, 1, 3, 1, 3, 5, 2, 2, 5, 1, 3, 1, 1, 5, 2, 2, 2, 2, 2, 5, 3, 4, 3, 1, 1, 1, 2, 1, 6, 4, 1, 1, 4, 2, 1, 1, 3, 1, 1, 6, 3, 1, 3, 1, 2, 1, 3, 6, 6, 4, 2, 1, 2, 4, 1, 4, 2, 3, 4, 3, 1, 4, 3, 5, 4, 1, 2, 2, 1, 1, 5, 4, 2, 2, 3, 2, 1, 6, 2, 4, 3, 5, 5, 3, 4, 2, 6, 1, 3, 3, 1, 3, 1, 6, 5, 4, 2, 6, 2, 6, 5, 1, 3, 6, 1, 1, 4, 6, 4, 4, 1, 2, 5, 1, 1, 3, 5, 6, 5, 2, 1, 3, 1, 6, 1, 5, 6, 1, 1, 2, 5, 2, 2, 2, 3, 4, 6, 1, 2, 6, 4, 3, 2, 3, 2, 2, 1, 1, 5, 2, 3, 2, 3, 2, 2, 2, 5, 1, 2, 2, 1, 2, 1, 1, 1, 5, 1, 3, 4, 2, 5, 1, 2, 2, 5, 4, 1, 5, 1, 1, 1, 3, 3, 6, 5, 5, 3, 1, 3, 4, 6, 2, 1, 5, 4, 5, 3, 2, 5, 6, 3, 2, 1, 6, 4, 1, 4, 5, 2, 3, 2, 3, 1, 4, 1, 2, 2, 6, 1, 3, 2, 2, 1, 2, 4, 2, 3, 4, 1, 5] n = length(sample) = 500 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.0, 1.0, 0.0] diceX.p .|> rd = [0.27, 0.23, 0.15, 0.12, 0.13, 0.1] edist.p .|> rd = [0.246, 0.226, 0.158, 0.124, 0.138, 0.108] predX.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1]
サイコロXの確率分布がモデル内のサイコロA, B, Cのどれとも大きく違っている場合には, 推定結果の誤差の限界はサイコロA, B, Cの中でサイコロXに最も近い分布を持つもので決まるので, 推定結果の誤差は大きくなる.
seed!(myseed)
diceX = Dice([0.5, 2.5, 1, 3, 0.5, 3])
N = 400
sample = rand(diceX, N)
@time animate_Bayes("Bayes1-3.gif"; step=4, diceX=diceX, sample=sample)
for n in 100:150:400
println("\n==================== n = $n")
print_result(diceX=diceX, sample=sample[1:n])
end
┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\Bayes1-3.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98 ┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\Bayes1-3_no_true_dist.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98
64.875713 seconds (31.49 M allocations: 1.425 GiB, 0.70% gc time) ==================== n = 100 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.048, 0.238, 0.095, 0.286, 0.048, 0.286] sample = [5, 4, 2, 3, 5, 3, 4, 2, 6, 4, 2, 2, 6, 2, 3, 6, 6, 6, 4, 4, 4, 6, 4, 4, 6, 4, 2, 6, 3, 4, 3, 6, 6, 2, 2, 6, 6, 6, 6, 6, 1, 4, 2, 3, 6, 5, 1, 4, 2, 3, 6, 6, 6, 4, 4, 4, 2, 4, 6, 4, 2, 4, 6, 6, 6, 4, 4, 4, 2, 6, 3, 4, 4, 2, 6, 4, 2, 4, 6, 4, 5, 6, 2, 6, 4, 4, 3, 4, 2, 6, 6, 2, 4, 6, 4, 2, 6, 4, 4, 2] n = length(sample) = 100 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.0, 0.0, 1.0] diceX.p .|> rd = [0.048, 0.238, 0.095, 0.286, 0.048, 0.286] edist.p .|> rd = [0.02, 0.2, 0.09, 0.33, 0.04, 0.32] predX.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] ==================== n = 250 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.048, 0.238, 0.095, 0.286, 0.048, 0.286] sample = [5, 4, 2, 3, 5, 3, 4, 2, 6, 4, 2, 2, 6, 2, 3, 6, 6, 6, 4, 4, 4, 6, 4, 4, 6, 4, 2, 6, 3, 4, 3, 6, 6, 2, 2, 6, 6, 6, 6, 6, 1, 4, 2, 3, 6, 5, 1, 4, 2, 3, 6, 6, 6, 4, 4, 4, 2, 4, 6, 4, 2, 4, 6, 6, 6, 4, 4, 4, 2, 6, 3, 4, 4, 2, 6, 4, 2, 4, 6, 4, 5, 6, 2, 6, 4, 4, 3, 4, 2, 6, 6, 2, 4, 6, 4, 2, 6, 4, 4, 2, 3, 6, 6, 6, 6, 3, 6, 3, 2, 4, 3, 4, 4, 2, 6, 6, 4, 4, 6, 4, 2, 6, 3, 4, 4, 4, 2, 5, 6, 2, 6, 4, 4, 6, 6, 1, 4, 2, 4, 6, 4, 5, 4, 6, 6, 2, 4, 4, 4, 4, 4, 6, 6, 4, 2, 3, 1, 6, 6, 6, 2, 4, 6, 3, 6, 2, 4, 2, 1, 3, 2, 6, 6, 2, 2, 6, 6, 2, 4, 6, 3, 6, 6, 4, 4, 4, 1, 4, 6, 6, 3, 2, 2, 2, 2, 1, 2, 4, 2, 4, 6, 5, 4, 6, 5, 4, 6, 6, 4, 4, 3, 4, 6, 2, 5, 6, 6, 2, 4, 6, 4, 2, 2, 4, 2, 6, 4, 6, 6, 6, 3, 3, 6, 4, 2, 2, 1, 2, 4, 6, 2, 2, 4, 4, 6, 5, 6, 2, 6, 6] n = length(sample) = 250 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.0, 0.0, 1.0] diceX.p .|> rd = [0.048, 0.238, 0.095, 0.286, 0.048, 0.286] edist.p .|> rd = [0.032, 0.208, 0.088, 0.304, 0.04, 0.328] predX.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] ==================== n = 400 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.048, 0.238, 0.095, 0.286, 0.048, 0.286] sample = [5, 4, 2, 3, 5, 3, 4, 2, 6, 4, 2, 2, 6, 2, 3, 6, 6, 6, 4, 4, 4, 6, 4, 4, 6, 4, 2, 6, 3, 4, 3, 6, 6, 2, 2, 6, 6, 6, 6, 6, 1, 4, 2, 3, 6, 5, 1, 4, 2, 3, 6, 6, 6, 4, 4, 4, 2, 4, 6, 4, 2, 4, 6, 6, 6, 4, 4, 4, 2, 6, 3, 4, 4, 2, 6, 4, 2, 4, 6, 4, 5, 6, 2, 6, 4, 4, 3, 4, 2, 6, 6, 2, 4, 6, 4, 2, 6, 4, 4, 2, 3, 6, 6, 6, 6, 3, 6, 3, 2, 4, 3, 4, 4, 2, 6, 6, 4, 4, 6, 4, 2, 6, 3, 4, 4, 4, 2, 5, 6, 2, 6, 4, 4, 6, 6, 1, 4, 2, 4, 6, 4, 5, 4, 6, 6, 2, 4, 4, 4, 4, 4, 6, 6, 4, 2, 3, 1, 6, 6, 6, 2, 4, 6, 3, 6, 2, 4, 2, 1, 3, 2, 6, 6, 2, 2, 6, 6, 2, 4, 6, 3, 6, 6, 4, 4, 4, 1, 4, 6, 6, 3, 2, 2, 2, 2, 1, 2, 4, 2, 4, 6, 5, 4, 6, 5, 4, 6, 6, 4, 4, 3, 4, 6, 2, 5, 6, 6, 2, 4, 6, 4, 2, 2, 4, 2, 6, 4, 6, 6, 6, 3, 3, 6, 4, 2, 2, 1, 2, 4, 6, 2, 2, 4, 4, 6, 5, 6, 2, 6, 6, 2, 4, 2, 4, 4, 2, 2, 3, 5, 3, 6, 4, 2, 4, 3, 2, 3, 6, 4, 4, 6, 4, 4, 6, 3, 6, 2, 2, 6, 2, 3, 4, 1, 6, 2, 2, 2, 2, 2, 6, 3, 4, 3, 4, 4, 4, 2, 4, 6, 4, 4, 4, 2, 6, 4, 1, 3, 4, 4, 6, 3, 1, 3, 2, 2, 4, 3, 6, 6, 4, 6, 4, 2, 2, 4, 4, 2, 4, 4, 3, 1, 4, 4, 5, 4, 4, 2, 6, 2, 2, 6, 4, 2, 2, 3, 2, 4, 6, 6, 4, 3, 5, 6, 3, 4, 2, 6, 4, 3, 4, 1, 4, 1, 6, 6, 4, 2, 6, 2, 6, 6, 6, 3, 6, 2, 4, 4, 6, 4, 4, 4, 2, 5, 1, 2, 3, 6, 6, 6, 2, 4, 4, 4, 6, 4, 6, 6, 4, 4, 2] n = length(sample) = 400 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.0, 0.0, 1.0] diceX.p .|> rd = [0.048, 0.238, 0.095, 0.286, 0.048, 0.286] edist.p .|> rd = [0.038, 0.218, 0.102, 0.318, 0.035, 0.29] predX.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3]
diceA = Dice([3,3,1,1,1,1])
diceB = Dice([1,1,3,3,1,1])
diceC = Dice([1,1,1,1,3,3])
@show diceA.p .|> rd
@show diceB.p .|> rd
@show diceC.p .|> rd
ymax = maximum(vcat(diceA.p, diceB.p, diceC.p))
pal = palette(:default)
P1 = bar(diceA.p; ylim=(-0.05*ymax, 1.2ymax), color=pal[1], label="Dice A", alpha=0.5)
P2 = bar(diceB.p; ylim=(-0.05*ymax, 1.2ymax), color=pal[2], label="Dice B", alpha=0.5)
P3 = bar(diceC.p; ylim=(-0.05*ymax, 1.2ymax), color=pal[3], label="Dice C", alpha=0.5)
plot(P1, P2, P3; size=(600, 200), layout=(1,3))
diceA.p .|> rd = [0.3, 0.3, 0.1, 0.1, 0.1, 0.1] diceB.p .|> rd = [0.1, 0.1, 0.3, 0.3, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.1, 0.3, 0.3]
seed!(myseed)
diceX = Dice([1,1,1,1,1,1])
N = 600
sample = rand(diceX, N)
@time animate_Bayes("Bayes2-1.gif"; fps=5, step=4,
diceX=diceX, sample=sample, diceA=diceA, diceB=diceB, diceC=diceC
)
for n in 100:250:600
println("\n==================== n = $n")
print_result(diceX=diceX, sample=sample[1:n])
end
┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\Bayes2-1.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98 ┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\Bayes2-1_no_true_dist.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98
95.491092 seconds (53.47 M allocations: 2.679 GiB, 0.84% gc time) ==================== n = 100 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] sample = [5, 4, 4, 3, 5, 3, 4, 2, 5, 3, 2, 2, 6, 2, 3, 6, 6, 5, 3, 3, 4, 6, 4, 1, 6, 4, 2, 5, 3, 1, 3, 5, 5, 2, 2, 6, 6, 6, 6, 5, 1, 3, 2, 3, 5, 5, 1, 4, 2, 3, 5, 6, 6, 3, 1, 1, 4, 1, 6, 1, 2, 1, 5, 6, 6, 4, 1, 1, 2, 5, 3, 3, 1, 2, 6, 4, 2, 4, 5, 4, 5, 6, 2, 6, 4, 1, 3, 4, 2, 6, 5, 2, 1, 6, 1, 2, 5, 1, 4, 2] n = length(sample) = 100 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [1.0, 0.0, 0.0] diceX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] edist.p .|> rd = [0.16, 0.18, 0.15, 0.15, 0.17, 0.19] predX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] ==================== n = 350 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] sample = [5, 4, 4, 3, 5, 3, 4, 2, 5, 3, 2, 2, 6, 2, 3, 6, 6, 5, 3, 3, 4, 6, 4, 1, 6, 4, 2, 5, 3, 1, 3, 5, 5, 2, 2, 6, 6, 6, 6, 5, 1, 3, 2, 3, 5, 5, 1, 4, 2, 3, 5, 6, 6, 3, 1, 1, 4, 1, 6, 1, 2, 1, 5, 6, 6, 4, 1, 1, 2, 5, 3, 3, 1, 2, 6, 4, 2, 4, 5, 4, 5, 6, 2, 6, 4, 1, 3, 4, 2, 6, 5, 2, 1, 6, 1, 2, 5, 1, 4, 2, 3, 5, 5, 6, 6, 3, 6, 3, 2, 4, 3, 1, 3, 4, 6, 5, 4, 4, 6, 3, 2, 5, 3, 1, 4, 3, 2, 5, 5, 2, 5, 1, 3, 6, 6, 1, 4, 2, 4, 5, 1, 5, 4, 6, 5, 4, 1, 4, 4, 1, 4, 6, 6, 4, 4, 3, 1, 5, 6, 5, 4, 3, 6, 3, 5, 4, 3, 2, 1, 3, 2, 6, 5, 2, 4, 5, 6, 2, 4, 5, 3, 6, 5, 1, 1, 1, 1, 1, 5, 6, 3, 4, 2, 2, 2, 1, 2, 3, 2, 1, 6, 5, 3, 6, 5, 4, 6, 6, 3, 3, 3, 4, 6, 4, 5, 6, 6, 2, 4, 6, 4, 2, 2, 4, 2, 5, 3, 5, 6, 6, 3, 3, 5, 1, 4, 4, 1, 2, 3, 5, 2, 4, 4, 1, 6, 5, 5, 2, 5, 6, 4, 1, 2, 1, 1, 2, 2, 3, 5, 3, 6, 3, 2, 4, 3, 2, 3, 6, 4, 1, 6, 1, 3, 5, 3, 5, 2, 2, 5, 4, 3, 1, 1, 5, 2, 2, 2, 2, 2, 5, 3, 4, 3, 1, 1, 1, 2, 1, 6, 4, 1, 1, 4, 6, 1, 1, 3, 1, 1, 6, 3, 1, 3, 2, 2, 1, 3, 6, 6, 4, 6, 3, 2, 4, 1, 4, 2, 3, 4, 3, 1, 4, 3, 5, 4, 1, 2, 6, 4, 4, 5, 4, 2, 2, 3, 2, 1, 6, 6, 4] n = length(sample) = 350 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [1.0, 0.0, 0.0] diceX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] edist.p .|> rd = [0.166, 0.171, 0.169, 0.174, 0.151, 0.169] predX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] ==================== n = 600 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] sample = [5, 4, 4, 3, 5, 3, 4, 2, 5, 3, 2, 2, 6, 2, 3, 6, 6, 5, 3, 3, 4, 6, 4, 1, 6, 4, 2, 5, 3, 1, 3, 5, 5, 2, 2, 6, 6, 6, 6, 5, 1, 3, 2, 3, 5, 5, 1, 4, 2, 3, 5, 6, 6, 3, 1, 1, 4, 1, 6, 1, 2, 1, 5, 6, 6, 4, 1, 1, 2, 5, 3, 3, 1, 2, 6, 4, 2, 4, 5, 4, 5, 6, 2, 6, 4, 1, 3, 4, 2, 6, 5, 2, 1, 6, 1, 2, 5, 1, 4, 2, 3, 5, 5, 6, 6, 3, 6, 3, 2, 4, 3, 1, 3, 4, 6, 5, 4, 4, 6, 3, 2, 5, 3, 1, 4, 3, 2, 5, 5, 2, 5, 1, 3, 6, 6, 1, 4, 2, 4, 5, 1, 5, 4, 6, 5, 4, 1, 4, 4, 1, 4, 6, 6, 4, 4, 3, 1, 5, 6, 5, 4, 3, 6, 3, 5, 4, 3, 2, 1, 3, 2, 6, 5, 2, 4, 5, 6, 2, 4, 5, 3, 6, 5, 1, 1, 1, 1, 1, 5, 6, 3, 4, 2, 2, 2, 1, 2, 3, 2, 1, 6, 5, 3, 6, 5, 4, 6, 6, 3, 3, 3, 4, 6, 4, 5, 6, 6, 2, 4, 6, 4, 2, 2, 4, 2, 5, 3, 5, 6, 6, 3, 3, 5, 1, 4, 4, 1, 2, 3, 5, 2, 4, 4, 1, 6, 5, 5, 2, 5, 6, 4, 1, 2, 1, 1, 2, 2, 3, 5, 3, 6, 3, 2, 4, 3, 2, 3, 6, 4, 1, 6, 1, 3, 5, 3, 5, 2, 2, 5, 4, 3, 1, 1, 5, 2, 2, 2, 2, 2, 5, 3, 4, 3, 1, 1, 1, 2, 1, 6, 4, 1, 1, 4, 6, 1, 1, 3, 1, 1, 6, 3, 1, 3, 2, 2, 1, 3, 6, 6, 4, 6, 3, 2, 4, 1, 4, 2, 3, 4, 3, 1, 4, 3, 5, 4, 1, 2, 6, 4, 4, 5, 4, 2, 2, 3, 2, 1, 6, 6, 4, 3, 5, 5, 3, 4, 2, 6, 1, 3, 3, 1, 3, 1, 6, 5, 4, 2, 6, 2, 6, 5, 5, 3, 6, 4, 1, 4, 6, 4, 4, 3, 2, 5, 1, 4, 3, 5, 6, 5, 2, 1, 3, 1, 6, 1, 5, 6, 1, 1, 2, 5, 2, 2, 2, 3, 4, 6, 5, 2, 6, 4, 3, 6, 3, 2, 2, 1, 1, 5, 6, 3, 2, 3, 6, 2, 6, 5, 4, 2, 6, 4, 2, 1, 2, 5, 5, 1, 3, 4, 6, 5, 1, 2, 6, 5, 4, 1, 5, 1, 1, 4, 3, 3, 6, 5, 5, 3, 5, 3, 4, 6, 2, 4, 5, 4, 5, 3, 6, 5, 6, 3, 6, 1, 6, 4, 1, 4, 5, 2, 3, 2, 3, 1, 4, 1, 6, 2, 6, 1, 3, 2, 6, 1, 6, 4, 2, 3, 4, 1, 5, 4, 3, 4, 3, 6, 5, 1, 5, 1, 3, 2, 5, 2, 5, 3, 3, 2, 3, 3, 6, 2, 5, 2, 3, 5, 4, 5, 6, 2, 4, 4, 3, 3, 1, 2, 3, 1, 3, 2, 1, 3, 6, 6, 6, 3, 5, 1, 5, 6, 4, 3, 6, 3, 5, 1, 3, 6, 2, 6, 2, 1, 3, 2, 2, 6, 2, 2, 3, 1, 4, 3, 1, 3, 2, 3, 3, 4, 2, 2, 5, 3, 1, 4, 5, 5, 3, 3, 4, 4, 1, 5, 3, 4, 2, 5, 2, 4, 6, 4, 4] n = length(sample) = 600 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [1.0, 0.0, 0.0] diceX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] edist.p .|> rd = [0.158, 0.172, 0.185, 0.163, 0.157, 0.165] predX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167]
seed!(myseed)
diceX = Dice([4,4,5,4,4,4])
N = 300
sample = rand(diceX, N)
@time animate_Bayes("Bayes2-2.gif"; fps=5, step=2,
diceX=diceX, sample=sample, diceA=diceA, diceB=diceB, diceC=diceC
)
for n in 30:90:300
println("\n==================== n = $n")
print_result(diceX=diceX, sample=sample[1:n])
end
┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\Bayes2-2.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98 ┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\Bayes2-2_no_true_dist.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98
93.647836 seconds (38.79 M allocations: 1.534 GiB, 0.55% gc time) ==================== n = 30 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.16, 0.16, 0.2, 0.16, 0.16, 0.16] sample = [5, 4, 4, 3, 5, 3, 4, 2, 5, 3, 2, 2, 6, 2, 3, 6, 3, 5, 3, 3, 4, 6, 4, 1, 6, 4, 2, 5, 3, 1] n = length(sample) = 30 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.95, 0.015, 0.035] diceX.p .|> rd = [0.16, 0.16, 0.2, 0.16, 0.16, 0.16] edist.p .|> rd = [0.067, 0.167, 0.267, 0.2, 0.167, 0.133] predX.p .|> rd = [0.166, 0.165, 0.165, 0.167, 0.167, 0.17] ==================== n = 120 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.16, 0.16, 0.2, 0.16, 0.16, 0.16] sample = [5, 4, 4, 3, 5, 3, 4, 2, 5, 3, 2, 2, 6, 2, 3, 6, 3, 5, 3, 3, 4, 6, 4, 1, 6, 4, 2, 5, 3, 1, 3, 5, 5, 2, 2, 6, 6, 6, 6, 5, 1, 3, 2, 3, 5, 5, 1, 4, 2, 3, 5, 6, 6, 3, 1, 1, 4, 1, 6, 1, 2, 1, 5, 6, 6, 4, 1, 1, 2, 5, 3, 3, 1, 3, 6, 4, 2, 4, 5, 4, 5, 6, 2, 6, 4, 1, 3, 4, 2, 6, 5, 2, 1, 6, 1, 2, 5, 1, 4, 2, 3, 5, 5, 6, 6, 3, 6, 3, 3, 4, 3, 1, 3, 3, 6, 5, 4, 4, 6, 3] n = length(sample) = 120 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [1.0, 0.0, 0.0] diceX.p .|> rd = [0.16, 0.16, 0.2, 0.16, 0.16, 0.16] edist.p .|> rd = [0.142, 0.142, 0.208, 0.15, 0.167, 0.192] predX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] ==================== n = 210 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.16, 0.16, 0.2, 0.16, 0.16, 0.16] sample = [5, 4, 4, 3, 5, 3, 4, 2, 5, 3, 2, 2, 6, 2, 3, 6, 3, 5, 3, 3, 4, 6, 4, 1, 6, 4, 2, 5, 3, 1, 3, 5, 5, 2, 2, 6, 6, 6, 6, 5, 1, 3, 2, 3, 5, 5, 1, 4, 2, 3, 5, 6, 6, 3, 1, 1, 4, 1, 6, 1, 2, 1, 5, 6, 6, 4, 1, 1, 2, 5, 3, 3, 1, 3, 6, 4, 2, 4, 5, 4, 5, 6, 2, 6, 4, 1, 3, 4, 2, 6, 5, 2, 1, 6, 1, 2, 5, 1, 4, 2, 3, 5, 5, 6, 6, 3, 6, 3, 3, 4, 3, 1, 3, 3, 6, 5, 4, 4, 6, 3, 2, 5, 3, 3, 4, 3, 2, 5, 5, 2, 5, 1, 3, 6, 6, 1, 4, 2, 4, 3, 1, 5, 4, 3, 5, 4, 1, 4, 4, 1, 4, 6, 6, 4, 4, 3, 1, 5, 6, 5, 4, 3, 6, 3, 3, 3, 3, 2, 1, 3, 2, 6, 5, 2, 4, 5, 6, 2, 4, 3, 3, 6, 5, 1, 1, 1, 1, 1, 5, 6, 3, 4, 2, 2, 2, 1, 2, 3, 2, 1, 6, 5, 3, 6, 5, 4, 6, 6, 3, 3] n = length(sample) = 210 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [1.0, 0.0, 0.0] diceX.p .|> rd = [0.16, 0.16, 0.2, 0.16, 0.16, 0.16] edist.p .|> rd = [0.148, 0.143, 0.214, 0.157, 0.162, 0.176] predX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] ==================== n = 300 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.16, 0.16, 0.2, 0.16, 0.16, 0.16] sample = [5, 4, 4, 3, 5, 3, 4, 2, 5, 3, 2, 2, 6, 2, 3, 6, 3, 5, 3, 3, 4, 6, 4, 1, 6, 4, 2, 5, 3, 1, 3, 5, 5, 2, 2, 6, 6, 6, 6, 5, 1, 3, 2, 3, 5, 5, 1, 4, 2, 3, 5, 6, 6, 3, 1, 1, 4, 1, 6, 1, 2, 1, 5, 6, 6, 4, 1, 1, 2, 5, 3, 3, 1, 3, 6, 4, 2, 4, 5, 4, 5, 6, 2, 6, 4, 1, 3, 4, 2, 6, 5, 2, 1, 6, 1, 2, 5, 1, 4, 2, 3, 5, 5, 6, 6, 3, 6, 3, 3, 4, 3, 1, 3, 3, 6, 5, 4, 4, 6, 3, 2, 5, 3, 3, 4, 3, 2, 5, 5, 2, 5, 1, 3, 6, 6, 1, 4, 2, 4, 3, 1, 5, 4, 3, 5, 4, 1, 4, 4, 1, 4, 6, 6, 4, 4, 3, 1, 5, 6, 5, 4, 3, 6, 3, 3, 3, 3, 2, 1, 3, 2, 6, 5, 2, 4, 5, 6, 2, 4, 3, 3, 6, 5, 1, 1, 1, 1, 1, 5, 6, 3, 4, 2, 2, 2, 1, 2, 3, 2, 1, 6, 5, 3, 6, 5, 4, 6, 6, 3, 3, 3, 4, 6, 4, 5, 6, 6, 2, 4, 6, 4, 2, 2, 4, 2, 5, 3, 5, 6, 6, 3, 3, 5, 1, 4, 4, 1, 2, 3, 5, 2, 4, 4, 1, 6, 5, 5, 2, 5, 6, 4, 1, 2, 1, 1, 2, 2, 3, 5, 3, 6, 3, 2, 4, 3, 2, 3, 6, 4, 1, 6, 1, 3, 5, 3, 5, 2, 2, 5, 4, 3, 1, 1, 5, 2, 2, 2, 2, 2, 5, 3, 4, 3, 1, 1, 1, 2, 1, 6, 4] n = length(sample) = 300 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [1.0, 0.0, 0.0] diceX.p .|> rd = [0.16, 0.16, 0.2, 0.16, 0.16, 0.16] edist.p .|> rd = [0.15, 0.167, 0.2, 0.16, 0.16, 0.163] predX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167]
seed!(myseed)
diceX = Dice([2,3,1,2,1,2])
N = 100
sample = rand(diceX, N)
@time animate_Bayes("Bayes2-3.gif"; fps=5,
diceX=diceX, sample=sample, diceA=diceA, diceB=diceB, diceC=diceC
)
for n in 20:40:100
println("\n==================== n = $n")
print_result(diceX=diceX, sample=sample[1:n])
end
┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\Bayes2-3.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98 ┌ Info: Saved animation to │ fn = C:\Users\genkuroki\OneDrive\Math\Math0034\Bayes2-3_no_true_dist.gif └ @ Plots C:\Users\genkuroki\.julia\packages\Plots\WwFyB\src\animation.jl:98
64.536177 seconds (22.72 M allocations: 760.587 MiB, 0.43% gc time) ==================== n = 20 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.182, 0.273, 0.091, 0.182, 0.091, 0.182] sample = [5, 4, 2, 3, 5, 3, 4, 2, 6, 2, 1, 2, 6, 2, 3, 6, 4, 5, 2, 2] n = length(sample) = 20 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.851, 0.096, 0.054] diceX.p .|> rd = [0.182, 0.273, 0.091, 0.182, 0.091, 0.182] edist.p .|> rd = [0.05, 0.35, 0.15, 0.15, 0.15, 0.15] predX.p .|> rd = [0.176, 0.166, 0.166, 0.162, 0.162, 0.167] ==================== n = 60 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.182, 0.273, 0.091, 0.182, 0.091, 0.182] sample = [5, 4, 2, 3, 5, 3, 4, 2, 6, 2, 1, 2, 6, 2, 3, 6, 4, 5, 2, 2, 4, 6, 4, 1, 4, 4, 2, 5, 3, 1, 3, 5, 5, 2, 2, 6, 4, 4, 4, 5, 1, 2, 2, 3, 6, 5, 1, 4, 2, 3, 5, 6, 6, 2, 1, 1, 2, 1, 6, 1] n = length(sample) = 60 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [0.995, 0.004, 0.001] diceX.p .|> rd = [0.182, 0.273, 0.091, 0.182, 0.091, 0.182] edist.p .|> rd = [0.15, 0.25, 0.117, 0.183, 0.15, 0.15] predX.p .|> rd = [0.167, 0.167, 0.167, 0.166, 0.166, 0.167] ==================== n = 100 ---------- Model: diceA.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167] diceB.p .|> rd = [0.3, 0.2, 0.2, 0.1, 0.1, 0.1] diceC.p .|> rd = [0.1, 0.1, 0.1, 0.2, 0.2, 0.3] prior.p .|> rd = [0.333, 0.333, 0.333] ---------- Dice X and sample: diceX.p .|> rd = [0.182, 0.273, 0.091, 0.182, 0.091, 0.182] sample = [5, 4, 2, 3, 5, 3, 4, 2, 6, 2, 1, 2, 6, 2, 3, 6, 4, 5, 2, 2, 4, 6, 4, 1, 4, 4, 2, 5, 3, 1, 3, 5, 5, 2, 2, 6, 4, 4, 4, 5, 1, 2, 2, 3, 6, 5, 1, 4, 2, 3, 5, 6, 6, 2, 1, 1, 2, 1, 6, 1, 2, 1, 6, 6, 4, 4, 1, 1, 2, 5, 3, 2, 1, 1, 4, 4, 2, 4, 5, 4, 5, 6, 2, 6, 4, 1, 3, 4, 2, 6, 5, 1, 1, 6, 1, 2, 6, 1, 4, 2] n = length(sample) = 100 ---------- Results: prior.p .|> rd = [0.333, 0.333, 0.333] posterior.p .|> rd = [1.0, 0.0, 0.0] diceX.p .|> rd = [0.182, 0.273, 0.091, 0.182, 0.091, 0.182] edist.p .|> rd = [0.19, 0.23, 0.09, 0.2, 0.13, 0.16] predX.p .|> rd = [0.167, 0.167, 0.167, 0.167, 0.167, 0.167]