Reference: Ch 11 of A. El Gamal and Y.-H. Kim, Network Information Theory, Cambridge University Press, 2011.
Author: Cheuk Ting Li
from psitip import *
PsiOpts.setting(
solver = "ortools.GLOP", # Set linear programming solver
repr_latex = True, # Jupyter Notebook LaTeX display
venn_latex = True, # LaTeX in diagrams
proof_note_color = "blue", # Reasons in proofs are blue
)
M, X, Y = rv("M, X, Y")
Xh = rv("Xh", latex="\hat{X}")
R = real("R")
# Lossy compression with side info available causally at decoder
model = CodingModel()
model.set_rate(M, R) # The rate of M is R
model.add_edge(X, Y) # Correlated source X, Y
model.add_node(X, M, label = "Enc") # Encoder maps X to M
model.add_node(M+Y, Xh,
rv_in_causal = Y, label = "Dec") # Decoder maps M,Y to Xh
model.graph() # Draw diagram
R_opt = model.minimum(R, R) # Get optimal rate, recovers [Weissman-El Gamal 2006]
R_opt.upper_bound()
r = model.get_inner(is_proof=True) # Achievability proof
r.display(note=True)
# Converse proof
model.proof_outer(r)
(R_opt >= I(X & Xh | Y)).solve() # A lower bound on R_opt
# The program makes an implicit assumption that the empirical joint distribution
# of (X,Y,Z) is fixed, so we are not free to choose Z conditionally
# independent of Y given X. Hence, the following test on upper bound fails:
# bool(R_opt <= I(X & Xh))
# To show the upper bound, we need an additional assumption:
(markov(Y, X, Xh) >> (R_opt <= I(X & Xh))).solve() # Upper bound
M, X, Y = rv("M, X, Y")
R = real("R")
# Lossless compression with side info available causally at decoder
model = CodingModel()
model.set_rate(M, R) # The rate of M is R
model.add_edge(X, Y) # Correlated source X, Y
model.add_node(X, M, label = "Enc") # Encoder maps X to M
model.add_node(M+Y, X,
rv_in_causal = Y, label = "Dec") # Decoder maps M,Y to X
model.graph() # Draw diagram
R_opt = model.minimum(R, R) # Get optimal rate, recovers [Weissman-El Gamal 2006]
R_opt.upper_bound()
r = model.get_inner(is_proof=True) # Achievability proof
r.display(note=True)
# Converse proof
model.proof_outer(r)
M, X, Y = rv("M, X, Y")
Xh = rv("Xh", latex="\hat{X}")
R = real("R")
# Lossy compression with side info available noncausally at decoder
model = CodingModel()
model.set_rate(M, R) # The rate of M is R
model.add_edge(X, Y) # Correlated source X, Y
model.add_node(X, M, label = "Enc") # Encoder maps X to M
model.add_node(M+Y, Xh, label = "Dec") # Decoder maps M,Y to Xh
model.graph() # Draw diagram
R_opt = model.minimum(R, R) # Get optimal rate, recovers [Wyner-Ziv 1976]
R_opt.upper_bound()
r = model.get_inner(is_proof=True) # Achievability proof
r.display(note=True)
# Converse proof
model.proof_outer(r)
(R_opt >= I(X & Xh | Y)).solve() # A lower bound on R_opt
(markov(Y, X, Xh) >> (R_opt <= I(X & Xh))).solve() # Upper bound