[1]:
%pylab inline
#%matplotlib qt
import sk_dsp_comm.sigsys as ss
import scipy.signal as signal
from IPython.display import Audio, display
from IPython.display import Image, SVG
Populating the interactive namespace from numpy and matplotlib
[2]:
pylab.rcParams['savefig.dpi'] = 100 # default 72
#pylab.rcParams['figure.figsize'] = (6.0, 4.0) # default (6,4)
#%config InlineBackend.figure_formats=['png'] # default for inline viewing
%config InlineBackend.figure_formats=['svg'] # SVG inline viewing
#%config InlineBackend.figure_formats=['pdf'] # render pdf figs for LaTeX
[3]:
import scipy.special as special
import sk_dsp_comm.digitalcom as dc
import sk_dsp_comm.fec_conv as fec
Convolutional Coding¶
Rate 1/2¶
A convolutional encoder object can be created with the fec.FECConv
method. The rate of the object will be determined by the number of generator polynomials used. Right now, only rate 1/2 and rate 1/3 are supported, so 2 or three generator polynomials can be used. The following table shows ideal rate 1/2 generator polynomials. These are also included in the docstring.
Table 1: Weight spectra :math:`c_k` for bounding the codedrate 1/2 BEP.
CL |
Polynomials |
\(D_{free}\) |
\(d_f\) |
\(d_f+1\) |
\(d_f+2\) |
\(d_f+3\) |
\(d_f+4\) |
\(d_f+5\) |
\(d_f+6\) |
\(d_f+7\) |
---|---|---|---|---|---|---|---|---|---|---|
3 |
(5,7) = (‘101’,’111’) |
5 |
1 |
4 |
12 |
32 |
80 |
192 |
488 |
1024 |
4 |
(15,17) = (‘1101’,’1111’) |
6 |
2 |
7 |
18 |
49 |
130 |
333 |
836 |
2069 |
5 |
(23,35) = (‘10011’,’11101’) |
7 |
4 |
12 |
20 |
72 |
225 |
500 |
1324 |
3680 |
6 |
(53,75) = (‘101011’,’111101’) |
8 |
2 |
36 |
32 |
62 |
332 |
701 |
2342 |
5503 |
7 |
(133,171) = (‘1011011’,’1111001’) |
10 |
36 |
0 |
211 |
0 |
1404 |
0 |
11633 |
0 |
In addition to the generator polynomials, you can specify a decision depth for the object. This will determine how many state transitions will be used for the traceback. The following shows how to create a rate 1/2 fec_conv
object with contraint length 3 and decision depth 10.
[4]:
cc1 = fec.FECConv(('111','101'),10)
The trellis_plot()
method can be used to see the state transitions of the fec_conv
object.
[5]:
cc1.trellis_plot()
/home/docs/.pyenv/versions/3.7.9/lib/python3.7/site-packages/numpy/core/_asarray.py:136: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
return array(a, dtype, copy=False, order=order, subok=True)
Rate 1/2 Hard Decision Decoding¶
Now, we would like to know the theoretical bit error probability bounds of our convolutional encoding/decoding setup. We can do this using the fec.conv_Pb_bound
method. The method takes the rate, degrees of freedom, \(c_k\) values, SNR, hard or soft decisions, and order M for an MPSK modulation scheme as arguments. It returns the BEP. The following shows theoretical bounds for rate 1/2 encoding/decoding BPSK system. Compare with Ziemer pg 667.
Weight Structure Bounds BEP¶
[6]:
SNRdB = arange(0,12,.1)
Pb_uc = fec.conv_Pb_bound(1/2,7,[4, 12, 20, 72, 225],SNRdB,2)
Pb_s_half_3_hard = fec.conv_Pb_bound(1/2,5,[1, 4, 12, 32, 80, 192, 448, 1024],SNRdB,0)
Pb_s_half_5_hard = fec.conv_Pb_bound(1/2,7,[4, 12, 20, 72, 225, 500, 1324, 3680],SNRdB,0)
Pb_s_half_7_hard = fec.conv_Pb_bound(1/2,10,[36, 0, 211, 0, 1404, 0, 11633, 0],SNRdB,0)
Pb_s_half_9_hard = fec.conv_Pb_bound(1/2,12,[33, 0, 281, 0, 2179, 0, 15035, 0],SNRdB,0)
figure(figsize=(5,5))
semilogy(SNRdB,Pb_uc)
semilogy(SNRdB,Pb_s_half_3_hard,'--')
semilogy(SNRdB,Pb_s_half_5_hard,'--')
semilogy(SNRdB,Pb_s_half_7_hard,'--')
semilogy(SNRdB,Pb_s_half_9_hard,'--')
axis([0,12,1e-7,1e0])
title(r'Hard Decision Rate 1/2 Coding Theory Bounds')
xlabel(r'$E_b/N_0$ (dB)')
ylabel(r'Symbol Error Probability')
legend(('Uncoded BPSK','R=1/2, K=3, Hard',\
'R=1/2, K=5, Hard', 'R=1/2, K=7, Hard',\
'R=1/2, K=9, Hard'),loc='upper right')
grid();
BEP Simulation¶
Now that we can determine our BEP bounds, we can test the actual encoder/decoder using dummy binary data. The following code creates a rate 1/2 fec_conv object. It then generates dummy binary data and encodes the data using the conv_encoder
method. This method takes an array of binary values, and an initial state as the input and returns the encoded bits and states. We then adds nois to the encoded data according to the set \(E_b/N_0\) to simulate a noisy channel. The data is then
decoded using the viterbi_decoder
method. This method takes the array of noisy data and a decision metric. If the hard decision metric is selected, then we expect binary input values from around 0 to around 1. The method then returns the decoded binary values. Then the bit errors are counted. Once at least 100 bit errors are counted, the bit error probability is calculated.
[7]:
N_bits_per_frame = 10000
EbN0 = 4
total_bit_errors = 0
total_bit_count = 0
cc1 = fec.FECConv(('11101','10011'),25)
# Encode with shift register starting state of '0000'
state = '0000'
while total_bit_errors < 100:
# Create 100000 random 0/1 bits
x = randint(0,2,N_bits_per_frame)
y,state = cc1.conv_encoder(x,state)
# Add channel noise to bits, include antipodal level shift to [-1,1]
yn_soft = dc.cpx_awgn(2*y-1,EbN0-3,1) # Channel SNR is 3 dB less for rate 1/2
yn_hard = ((sign(yn_soft.real)+1)/2).astype(int)
z = cc1.viterbi_decoder(yn_hard,'hard')
# Count bit errors
bit_count, bit_errors = dc.bit_errors(x,z)
total_bit_errors += bit_errors
total_bit_count += bit_count
print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
print('*****************************************************')
print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Bits Received = 9976, Bit errors = 112, BEP = 1.12e-02
*****************************************************
Bits Received = 9976, Bit errors = 112, BEP = 1.12e-02
[8]:
y[:100].astype(int)
[8]:
array([1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1,
1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0,
0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0,
1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0])
The simulated BEP can then be compared to the theoretical bounds that were shown earlier. Some values were simulated for the constraint length 3 and constraint length 5 cases.
[9]:
SNRdB = arange(0,12,.1)
Pb_uc = fec.conv_Pb_bound(1/2,7,[4, 12, 20, 72, 225],SNRdB,2)
Pb_s_half_3_hard = fec.conv_Pb_bound(1/2,5,[1, 4, 12, 32, 80, 192, 448, 1024],SNRdB,0)
Pb_s_half_5_hard = fec.conv_Pb_bound(1/2,7,[4, 12, 20, 72, 225, 500, 1324, 3680],SNRdB,0)
Pb_s_half_7_hard = fec.conv_Pb_bound(1/2,10,[36, 0, 211, 0, 1404, 0, 11633, 0],SNRdB,0)
Pb_s_half_9_hard = fec.conv_Pb_bound(1/2,12,[33, 0, 281, 0, 2179, 0, 15035, 0],SNRdB,0)
Pb_s_half_5_hard_sim = array([3.36e-2,1.04e-2,1.39e-3,1.56e-04,1.24e-05])
Pb_s_half_3_hard_sim = array([2.59e-02,1.35e-02,2.71e-03,6.39e-04,9.73e-05,7.71e-06])
figure(figsize=(5,5))
semilogy(SNRdB,Pb_uc)
semilogy(SNRdB,Pb_s_half_3_hard,'y--')
semilogy(SNRdB,Pb_s_half_5_hard,'g--')
semilogy(SNRdB,Pb_s_half_7_hard,'--')
semilogy(SNRdB,Pb_s_half_9_hard,'--')
semilogy([3,4,5,6,7,8],Pb_s_half_3_hard_sim,'ys')
semilogy([3,4,5,6,7],Pb_s_half_5_hard_sim,'gs')
axis([0,12,1e-7,1e0])
title(r'Hard Decision Rate 1/2 Coding Measurements')
xlabel(r'$E_b/N_0$ (dB)')
ylabel(r'Symbol Error Probability')
legend(('Uncoded BPSK','R=1/2, K=3, Hard',\
'R=1/2, K=5, Hard', 'R=1/2, K=7, Hard',\
'R=1/2, K=9, Hard', 'R=1/2, K=3, Simulation',\
'R=1/2, K=5, Simulation'),loc='lower left')
grid();
We can look at the surviving paths using the traceback_plot
method.
[10]:
cc1.traceback_plot()
Soft Decision Decoding BEP Simulation¶
Soft decision decoding can also be done. In order to simulate the soft decision decoder, we can use the same setup as before, but now we specify ‘soft’ in the viterbi_decoder
method. We also have to pick a quantization level when we do this. If we want 3-bit quantization we would specify that the quant_level=3. When we use soft decisions we have to scale our noisy received values to values on \([0,2^{n}-1]\). So for a three-bit quantizaiton, we would scale to values on \([0,7]\).
This helps the system to get better distance metrics for all possible paths in the decoder, thus improving the BEP. The following shows how to simulate soft decisions.
[11]:
N_bits_per_frame = 10000
EbN0 = 2
total_bit_errors = 0
total_bit_count = 0
cc1 = fec.FECConv(('11101','10011'),25)
# Encode with shift register starting state of '0000'
state = '0000'
while total_bit_errors < 100:
# Create 100000 random 0/1 bits
x = randint(0,2,N_bits_per_frame)
y,state = cc1.conv_encoder(x,state)
# Add channel noise to bits, include antipodal level shift to [-1,1]
yn = dc.cpx_awgn(2*y-1,EbN0-3,1) # Channel SNR is 3dB less for rate 1/2
# Scale & level shift to three-bit quantization levels [0,7]
yn = (yn.real+1)/2*7
z = cc1.viterbi_decoder(yn.real,'soft',quant_level=3)
# Count bit errors
bit_count, bit_errors = dc.bit_errors(x,z)
total_bit_errors += bit_errors
total_bit_count += bit_count
print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
print('*****************************************************')
print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Bits Received = 9976, Bit errors = 143, BEP = 1.43e-02
*****************************************************
Bits Received = 9976, Bit errors = 143, BEP = 1.43e-02
[12]:
SNRdB = arange(0,12,.1)
Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
Pb_s_third_3 = fec.conv_Pb_bound(1/3,8,[3, 0, 15],SNRdB,1)
Pb_s_third_4 = fec.conv_Pb_bound(1/3,10,[6, 0, 6, 0],SNRdB,1)
Pb_s_third_5 = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56],SNRdB,1)
Pb_s_third_6 = fec.conv_Pb_bound(1/3,13,[1, 8, 26, 20, 19, 62],SNRdB,1)
Pb_s_third_7 = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,1)
Pb_s_third_8 = fec.conv_Pb_bound(1/3,16,[1, 0, 24, 0, 113, 0, 287, 0],SNRdB,1)
Pb_s_half = fec.conv_Pb_bound(1/2,7,[4, 12, 20, 72, 225],SNRdB,1)
figure(figsize=(5,5))
semilogy(SNRdB,Pb_uc)
semilogy(SNRdB,Pb_s_third_3,'--')
semilogy(SNRdB,Pb_s_third_4,'--')
semilogy(SNRdB,Pb_s_third_5,'g')
semilogy(SNRdB,Pb_s_third_6,'--')
semilogy(SNRdB,Pb_s_third_7,'--')
semilogy(SNRdB,Pb_s_third_8,'--')
#semilogy(SNRdB,Pb_s_half,'--')
semilogy([0,1,2,3,4,5],[9.08e-02,2.73e-02,6.52e-03,\
8.94e-04,8.54e-05,5e-6],'gs')
axis([0,12,1e-7,1e0])
title(r'Soft Decision Rate 1/2 Coding Measurements')
xlabel(r'$E_b/N_0$ (dB)')
ylabel(r'Symbol Error Probability')
legend(('Uncoded BPSK','R=1/3, K=3, Soft',\
'R=1/3, K=4, Soft','R=1/3, K=5, Soft',\
'R=1/3, K=6, Soft','R=1/3, K=7, Soft',\
'R=1/3, K=8, Soft','R=1/3, K=5, Sim', \
'Simulation'),loc='upper right')
grid();
The decoder can also do unquantized soft decisions. This is done by specifying ‘unquant’ for the metric type. The system will then expect floating point numbers on \([0,1]\) at the decoder input.
Rate 1/3¶
Rate 1/3 convolution encoding/decoding can be done very similarly to the rate 1/2 code. The difference when instantiating, is that the rate 1/3 uses 3 generator polynmials instead of 2. The following table shows ideal generator polynomials at different constraint lengths for rate 1/3 convolutional codes.
Table 2: Weight spectra :math:`c_k` for bounding the coded rate 1/3 BEP.
CL |
Polynomials |
\(d_{free}\) |
\(d_f\) |
\(d_f+1\) |
\(d_f+2\) |
\(d_f+3\) |
\(d_f+4\) |
\(d_f+5\) |
\(d_f+6\) |
\(d_f+7\) |
---|---|---|---|---|---|---|---|---|---|---|
3 |
(7,7,5) = (‘111’,’111’,’101’) |
8 |
3 |
0 |
15 |
0 |
58 |
0 |
201 |
0 |
4 |
(15,13,11) = (‘1111’,’1101’,’1011’) |
10 |
6 |
0 |
6 |
0 |
58 |
0 |
118 |
0 |
5 |
(31,27,21) = (‘11111’,’11011’,’10101’) |
12 |
12 |
0 |
12 |
0 |
56 |
0 |
320 |
0 |
6 |
(61,43,39) = (‘111101’,’101011’,’100111’) |
13 |
1 |
8 |
26 |
20 |
19 |
62 |
86 |
204 |
7 |
(121,101,91) = (‘1111001’,’1100101’,’1011011’) |
14 |
1 |
0 |
20 |
0 |
53 |
0 |
184 |
0 |
8 |
(247,217,149) = (‘11110111’,’11011001’,’10010101’) |
16 |
1 |
0 |
24 |
0 |
113 |
0 |
287 |
0 |
[13]:
cc2 = fec.FECConv(('111','111','101'),10)
cc2.trellis_plot()
/home/docs/.pyenv/versions/3.7.9/lib/python3.7/site-packages/numpy/core/_asarray.py:136: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
return array(a, dtype, copy=False, order=order, subok=True)
Rate 1/3 Hard Decision Decoding¶
Weight Structure Bounds BEP¶
Compare with Ziemer pg 668.
[14]:
SNRdB = arange(0,12,.1)
Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
Pb_s_third_3_hard = fec.conv_Pb_bound(1/3,8,[3, 0, 15, 0, 58, 0, 201, 0],SNRdB,0)
Pb_s_third_4_hard = fec.conv_Pb_bound(1/3,10,[6, 0, 6, 0, 58, 0, 118, 0],SNRdB,0)
Pb_s_third_5_hard = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56, 0, 320, 0],SNRdB,0)
Pb_s_third_6_hard = fec.conv_Pb_bound(1/3,13,[1, 8, 26, 20, 19, 62, 86, 204],SNRdB,0)
Pb_s_third_7_hard = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,0)
Pb_s_third_8_hard = fec.conv_Pb_bound(1/3,16,[1, 0, 24, 0, 113, 0, 287, 0],SNRdB,0)
figure(figsize=(5,5))
semilogy(SNRdB,Pb_uc)
semilogy(SNRdB,Pb_s_third_3_hard,'--')
#semilogy(SNRdB,Pb_s_third_4_hard,'--')
semilogy(SNRdB,Pb_s_third_5_hard,'--')
#semilogy(SNRdB,Pb_s_third_6_hard,'--')
semilogy(SNRdB,Pb_s_third_7_hard,'--')
#semilogy(SNRdB,Pb_s_third_8_hard,'--')
axis([0,12,1e-7,1e0])
title(r'Hard Decision Rate 1/3 Coding Theory Bounds')
xlabel(r'$E_b/N_0$ (dB)')
ylabel(r'Symbol Error Probability')
legend(('Uncoded BPSK','R=1/3, K=3, Hard',\
#'R=1/3, K=4, Hard', 'R=1/3, K=5, Hard',\
#'R=1/3, K=6, Hard', 'R=1/3, K=7, Hard',\
#'R=1/3, K=7, Hard'),loc='upper right')
'R=1/3, K=5, Hard', 'R=1/3, K=7, Hard'),\
loc='upper right')
grid();
BEP Simulation¶
[15]:
N_bits_per_frame = 10000
EbN0 = 3
total_bit_errors = 0
total_bit_count = 0
cc1 = fec.FECConv(('11111','11011','10101'),25)
# Encode with shift register starting state of '0000'
state = '0000'
while total_bit_errors < 100:
# Create 100000 random 0/1 bits
x = randint(0,2,N_bits_per_frame)
y,state = cc1.conv_encoder(x,state)
# Add channel noise to bits, include antipodal level shift to [-1,1]
yn_soft = dc.cpx_awgn(2*y-1,EbN0-10*log10(3),1) # Channel SNR is 10*log10(3) dB less
yn_hard = ((sign(yn_soft.real)+1)/2).astype(int)
z = cc1.viterbi_decoder(yn_hard.real,'hard')
# Count bit errors
bit_count, bit_errors = dc.bit_errors(x,z)
total_bit_errors += bit_errors
total_bit_count += bit_count
print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
print('*****************************************************')
print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Bits Received = 9976, Bit errors = 213, BEP = 2.14e-02
*****************************************************
Bits Received = 9976, Bit errors = 213, BEP = 2.14e-02
[16]:
SNRdB = arange(0,12,.1)
Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
Pb_s_third_3_hard = fec.conv_Pb_bound(1/3,8,[3, 0, 15, 0, 58, 0, 201, 0],SNRdB,0)
Pb_s_third_5_hard = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56, 0, 320, 0],SNRdB,0)
Pb_s_third_7_hard = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,0)
Pb_s_third_5_hard_sim = array([8.94e-04,1.11e-04,8.73e-06])
figure(figsize=(5,5))
semilogy(SNRdB,Pb_uc)
semilogy(SNRdB,Pb_s_third_3_hard,'r--')
semilogy(SNRdB,Pb_s_third_5_hard,'g--')
semilogy(SNRdB,Pb_s_third_7_hard,'k--')
semilogy(array([5,6,7]),Pb_s_third_5_hard_sim,'sg')
axis([0,12,1e-7,1e0])
title(r'Hard Decision Rate 1/3 Coding Measurements')
xlabel(r'$E_b/N_0$ (dB)')
ylabel(r'Symbol Error Probability')
legend(('Uncoded BPSK','R=1/3, K=3, Hard',\
'R=1/3, K=5, Hard', 'R=1/3, K=7, Hard',\
),loc='upper right')
grid();
[17]:
cc1.traceback_plot()
Soft Decision Decoding BEP Simulation¶
Here we use 3-bit quantization soft decoding.
[18]:
N_bits_per_frame = 10000
EbN0 = 2
total_bit_errors = 0
total_bit_count = 0
cc1 = fec.FECConv(('11111','11011','10101'),25)
# Encode with shift register starting state of '0000'
state = '0000'
while total_bit_errors < 100:
# Create 100000 random 0/1 bits
x = randint(0,2,N_bits_per_frame)
y,state = cc1.conv_encoder(x,state)
# Add channel noise to bits, include antipodal level shift to [-1,1]
yn = dc.cpx_awgn(2*y-1,EbN0-10*log10(3),1) # Channel SNR is 10*log10(3) dB less
# Translate to [0,7]
yn = (yn.real+1)/2*7
z = cc1.viterbi_decoder(yn,'soft',quant_level=3)
# Count bit errors
bit_count, bit_errors = dc.bit_errors(x,z)
total_bit_errors += bit_errors
total_bit_count += bit_count
print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
print('*****************************************************')
print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Bits Received = 9976, Bit errors = 67, BEP = 6.72e-03
Bits Received = 19952, Bit errors = 116, BEP = 5.81e-03
*****************************************************
Bits Received = 19952, Bit errors = 116, BEP = 5.81e-03
[19]:
SNRdB = arange(0,12,.1)
Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
Pb_s_third_3 = fec.conv_Pb_bound(1/3,8,[3, 0, 15, 0, 58, 0, 201, 0],SNRdB,1)
#Pb_s_third_4 = fec.conv_Pb_bound(1/3,10,[6, 0, 6, 0, 58, 0, 118, 0],SNRdB,1)
Pb_s_third_5 = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56, 0, 320, 0],SNRdB,1)
#Pb_s_third_6 = fec.conv_Pb_bound(1/3,13,[1, 8, 26, 20, 19, 62, 86, 204],SNRdB,1)
Pb_s_third_7 = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184, 0],SNRdB,1)
#Pb_s_third_8 = fec.conv_Pb_bound(1/3,16,[1, 0, 24, 0, 113, 0, 287, 0],SNRdB,1)
figure(figsize=(5,5))
semilogy(SNRdB,Pb_uc)
semilogy(SNRdB,Pb_s_third_3,'--')
#semilogy(SNRdB,Pb_s_third_4,'--')
semilogy(SNRdB,Pb_s_third_5,'g')
#semilogy(SNRdB,Pb_s_third_6,'--')
semilogy(SNRdB,Pb_s_third_7,'r--')
#semilogy(SNRdB,Pb_s_third_8,'--')
#semilogy(SNRdB,Pb_s_half,'--')
semilogy([0,1,2,3,4,5],[9.08e-02,2.73e-02,6.52e-03,\
8.94e-04,8.54e-05,5e-6],'gs')
axis([0,12,1e-7,1e0])
title(r'Soft Decision Rate 1/3 Coding Measurements')
xlabel(r'$E_b/N_0$ (dB)')
ylabel(r'Symbol Error Probability')
legend(('Uncoded BPSK','R=1/3, K=3, Soft',\
#'R=1/3, K=4, Soft','R=1/3, K=5, Soft',\
'R=1/3, K=5, Soft','R=1/3, K=7, Soft',\
#'R=1/3, K=8, Soft','R=1/2, K=5, Soft', \
'R-1/3, K=5, Simulation'),loc='upper right')
grid();