4
4
from matplotlib import pyplot as plt
5
5
6
6
import numpy
7
+ import scipy .special
7
8
8
9
#Quick implementation of a (7,5) convolutive code encoder
9
10
def encode75 (msg ):
@@ -29,8 +30,10 @@ def viterbi_branch_metrics(bits_rcvd, nbits_cw):
29
30
cw = numpy .array ([[0.0 ,0.0 ], [0.0 ,1.0 ], [1.0 ,0.0 ], [1.0 ,1.0 ]]) #The 4 different codewords
30
31
31
32
bits_rcvd = numpy .array (bits_rcvd ).reshape ((K , nbits_cw ))
32
- for k in range (0 , K ):
33
- ret_val [k ][:] = numpy .sum (numpy .abs (bits_rcvd [k ][:]- cw )** 2 , axis = 1 )
33
+ i = 0
34
+ for cw0 in cw :
35
+ ret_val [:,i ] = numpy .sum (numpy .abs (bits_rcvd - cw0 )** 2 , axis = 1 )
36
+ i += 1
34
37
35
38
return ret_val .flatten ()
36
39
@@ -42,8 +45,10 @@ def log_bcjr_branch_metrics(bits_rcvd, nbits_cw, sigma_b2):
42
45
cw = numpy .array ([[0.0 ,0.0 ], [0.0 ,1.0 ], [1.0 ,0.0 ], [1.0 ,1.0 ]]) #The 4 different codewords
43
46
44
47
bits_rcvd = numpy .array (bits_rcvd ).reshape ((K , nbits_cw ))
45
- for k in range (0 , K ):
46
- ret_val [k ][:] = - 1.0 / sigma_b2 * numpy .sum (numpy .abs (bits_rcvd [k ][:]- cw )** 2 , axis = 1 )
48
+ i = 0
49
+ for cw0 in cw :
50
+ ret_val [:,i ] = - 1.0 / sigma_b2 * numpy .sum (numpy .abs (bits_rcvd - cw0 )** 2 , axis = 1 )
51
+ i += 1
47
52
48
53
return ret_val .flatten ()
49
54
@@ -54,8 +59,10 @@ def max_log_bcjr_branch_metrics(bits_rcvd, nbits_cw, sigma_b2):
54
59
cw = numpy .array ([[0.0 ,0.0 ], [0.0 ,1.0 ], [1.0 ,0.0 ], [1.0 ,1.0 ]]) #The 4 different codewords
55
60
56
61
bits_rcvd = numpy .array (bits_rcvd ).reshape ((K , nbits_cw ))
57
- for k in range (0 , K ):
58
- ret_val [k ][:] = - numpy .sum (numpy .abs (bits_rcvd [k ][:]- cw )** 2 , axis = 1 )
62
+ i = 0
63
+ for cw0 in cw :
64
+ ret_val [:,i ] = - numpy .sum (numpy .abs (bits_rcvd - cw0 )** 2 , axis = 1 )
65
+ i += 1
59
66
60
67
return ret_val .flatten ()
61
68
@@ -64,9 +71,8 @@ def log_bcjr_compute_llr(app, K, S):
64
71
llr = numpy .zeros (K , dtype = numpy .float32 )
65
72
66
73
app = app .reshape ((K , S , 2 ))
67
- for k in range (0 , K ):
68
- #We need copy() to make the vectors C-contiguous
69
- llr [k ] = bcjr .max_star (app [k ,:,0 ].copy ()) - bcjr .max_star (app [k ,:,1 ].copy ())
74
+ llr = scipy .special .logsumexp (app [:,:,0 ], axis = 1 ) \
75
+ - scipy .special .logsumexp (app [:,:,1 ], axis = 1 )
70
76
71
77
return llr
72
78
@@ -75,8 +81,6 @@ def max_log_bcjr_compute_llr(app, K, S):
75
81
76
82
app = app .reshape ((K , S , 2 ))
77
83
llr = numpy .max (app [:,:,0 ], axis = 1 ) - numpy .max (app [:,:,1 ], axis = 1 )
78
- #for k in range(0, K):
79
- # llr[k] = numpy.max(app[k,:,0]) - numpy.max(app[k,:,1])
80
84
81
85
return llr
82
86
0 commit comments