2
2
3
3
from copy import copy
4
4
from operator import attrgetter
5
- import random
6
5
from unittest import TestCase , SkipTest
7
6
8
7
from parameterized import parameterized
25
24
DECIMAL_PLACES = 8
26
25
27
26
27
+ rand = np .random .RandomState (1337 )
28
+
29
+
28
30
class BaseTestCase (TestCase ):
29
31
def assert_indexes_match (self , result , expected ):
30
32
"""
@@ -42,7 +44,6 @@ def assert_indexes_match(self, result, expected):
42
44
43
45
44
46
class TestStats (BaseTestCase ):
45
-
46
47
# Simple benchmark, no drawdown
47
48
simple_benchmark = pd .Series (
48
49
np .array ([0. , 1. , 0. , 1. , 0. , 1. , 0. , 1. , 0. ]) / 100 ,
@@ -95,12 +96,12 @@ class TestStats(BaseTestCase):
95
96
96
97
# Random noise
97
98
noise = pd .Series (
98
- [ random . gauss (0 , 0.001 ) for i in range ( 1000 )] ,
99
+ rand . normal (0 , 0.001 , 1000 ),
99
100
index = pd .date_range ('2000-1-30' , periods = 1000 , freq = 'D' , tz = 'UTC' )
100
101
)
101
102
102
103
noise_uniform = pd .Series (
103
- [ random .uniform (- 0.01 , 0.01 ) for i in range ( 1000 )] ,
104
+ rand .uniform (- 0.01 , 0.01 , 1000 ),
104
105
index = pd .date_range ('2000-1-30' , periods = 1000 , freq = 'D' , tz = 'UTC' )
105
106
)
106
107
@@ -131,11 +132,11 @@ class TestStats(BaseTestCase):
131
132
)
132
133
133
134
# Sparse noise, same as noise but with np.nan sprinkled in
134
- replace_nan = random . sample (noise .index .tolist (), random .randint (1 , 10 ))
135
+ replace_nan = rand . choice (noise .index .tolist (), rand .randint (1 , 10 ))
135
136
sparse_noise = noise .replace (replace_nan , np .nan )
136
137
137
138
# Sparse flat line at 0.01
138
- replace_nan = random . sample (noise .index .tolist (), random .randint (1 , 10 ))
139
+ replace_nan = rand . choice (noise .index .tolist (), rand .randint (1 , 10 ))
139
140
sparse_flat_line_1_tz = flat_line_1_tz .replace (replace_nan , np .nan )
140
141
141
142
one = [- 0.00171614 , 0.01322056 , 0.03063862 , - 0.01422057 , - 0.00489779 ,
@@ -432,12 +433,12 @@ def test_sharpe_translation_1(self, returns, required_return, translation):
432
433
def test_sharpe_noise (self , small , large ):
433
434
index = pd .date_range ('2000-1-30' , periods = 1000 , freq = 'D' )
434
435
smaller_normal = pd .Series (
435
- [ random . gauss (.01 , small ) for i in range ( 1000 )] ,
436
- index = index
436
+ rand . normal (.01 , small , 1000 ),
437
+ index = index ,
437
438
)
438
439
larger_normal = pd .Series (
439
- [ random . gauss (.01 , large ) for i in range ( 1000 )] ,
440
- index = index
440
+ rand . normal (.01 , large , 1000 ),
441
+ index = index ,
441
442
)
442
443
assert self .empyrical .sharpe_ratio (smaller_normal , 0.001 ) > \
443
444
self .empyrical .sharpe_ratio (larger_normal , 0.001 )
@@ -517,12 +518,20 @@ def test_downside_risk_trans(self, returns, required_return):
517
518
])
518
519
def test_downside_risk_std (self , smaller_std , larger_std ):
519
520
less_noise = pd .Series (
520
- [random .gauss (0 , smaller_std ) for i in range (1000 )],
521
- index = pd .date_range ('2000-1-30' , periods = 1000 , freq = 'D' )
521
+ (
522
+ rand .normal (0 , smaller_std , 1000 )
523
+ if smaller_std != 0
524
+ else np .full (1000 , 0 )
525
+ ),
526
+ index = pd .date_range ('2000-1-30' , periods = 1000 , freq = 'D' ),
522
527
)
523
528
more_noise = pd .Series (
524
- [random .gauss (0 , larger_std ) for i in range (1000 )],
525
- index = pd .date_range ('2000-1-30' , periods = 1000 , freq = 'D' )
529
+ (
530
+ rand .normal (0 , larger_std , 1000 )
531
+ if larger_std != 0
532
+ else np .full (1000 , 0 )
533
+ ),
534
+ index = pd .date_range ('2000-1-30' , periods = 1000 , freq = 'D' ),
526
535
)
527
536
assert self .empyrical .downside_risk (less_noise ) < \
528
537
self .empyrical .downside_risk (more_noise )
@@ -580,7 +589,7 @@ def test_sortino_add_noise(self, returns, required_return):
580
589
sr_1 = self .empyrical .sortino_ratio (returns , required_return )
581
590
upside_values = returns [returns > required_return ].index .tolist ()
582
591
# Add large losses at random upside locations
583
- loss_loc = random . sample (upside_values , 2 )
592
+ loss_loc = rand . choice (upside_values , 2 )
584
593
returns [loss_loc [0 ]] = - 0.01
585
594
sr_2 = self .empyrical .sortino_ratio (returns , required_return )
586
595
returns [loss_loc [1 ]] = - 0.01
@@ -600,7 +609,7 @@ def test_sortino_sub_noise(self, returns, required_return):
600
609
sr_1 = self .empyrical .sortino_ratio (returns , required_return )
601
610
downside_values = returns [returns < required_return ].index .tolist ()
602
611
# Replace some values below the required return to the required return
603
- loss_loc = random . sample (downside_values , 2 )
612
+ loss_loc = rand . choice (downside_values , 2 )
604
613
returns [loss_loc [0 ]] = required_return
605
614
sr_2 = self .empyrical .sortino_ratio (returns , required_return )
606
615
returns [loss_loc [1 ]] = required_return
@@ -770,7 +779,7 @@ def test_alpha_beta_translation(self, mean_returns, translation):
770
779
means = [mean_returns , .001 ]
771
780
covs = [[std_returns ** 2 , std_returns * std_bench * correlation ],
772
781
[std_returns * std_bench * correlation , std_bench ** 2 ]]
773
- (ret , bench ) = np . random .multivariate_normal (means , covs , 1000 ).T
782
+ (ret , bench ) = rand .multivariate_normal (means , covs , 1000 ).T
774
783
returns = pd .Series (
775
784
ret ,
776
785
index = pd .date_range ('2000-1-30' , periods = 1000 , freq = 'D' ))
@@ -821,15 +830,15 @@ def test_alpha_beta_correlation(self, corr_less, corr_more):
821
830
means_less = [mean_returns , mean_bench ]
822
831
covs_less = [[std_returns ** 2 , std_returns * std_bench * corr_less ],
823
832
[std_returns * std_bench * corr_less , std_bench ** 2 ]]
824
- (ret_less , bench_less ) = np . random .multivariate_normal (
833
+ (ret_less , bench_less ) = rand .multivariate_normal (
825
834
means_less , covs_less , 1000 ).T
826
835
returns_less = pd .Series (ret_less , index = index )
827
836
benchmark_less = pd .Series (bench_less , index = index )
828
837
# Genereate more highly correlated returns
829
838
means_more = [mean_returns , mean_bench ]
830
839
covs_more = [[std_returns ** 2 , std_returns * std_bench * corr_more ],
831
840
[std_returns * std_bench * corr_more , std_bench ** 2 ]]
832
- (ret_more , bench_more ) = np . random .multivariate_normal (
841
+ (ret_more , bench_more ) = rand .multivariate_normal (
833
842
means_more , covs_more , 1000 ).T
834
843
returns_more = pd .Series (ret_more , index = index )
835
844
benchmark_more = pd .Series (bench_more , index = index )
@@ -865,17 +874,32 @@ def test_alpha_beta_with_nan_inputs(self, returns, benchmark):
865
874
(2 * noise , noise , 2.0 ),
866
875
(noise , inv_noise , - 1.0 ),
867
876
(2 * noise , inv_noise , - 2.0 ),
868
- (sparse_noise * flat_line_1_tz , sparse_flat_line_1_tz , np .nan ),
877
+ (sparse_noise * flat_line_1_tz , sparse_flat_line_1_tz , np .nan ),
878
+ (
879
+ simple_benchmark + rand .normal (0 , 0.001 , len (simple_benchmark )),
880
+ pd .DataFrame ({'returns' : simple_benchmark }),
881
+ 1.0 ,
882
+ 2 ,
883
+ ),
869
884
])
870
- def test_beta (self , returns , benchmark , expected ):
885
+ def test_beta (self ,
886
+ returns ,
887
+ benchmark ,
888
+ expected ,
889
+ decimal_places = DECIMAL_PLACES ):
871
890
observed = self .empyrical .beta (returns , benchmark )
872
891
assert_almost_equal (
873
892
observed ,
874
893
expected ,
875
- DECIMAL_PLACES )
894
+ decimal_places ,
895
+ )
876
896
877
897
if len (returns ) == len (benchmark ):
878
898
# Compare to scipy linregress
899
+
900
+ if isinstance (benchmark , pd .DataFrame ):
901
+ benchmark = benchmark ['returns' ]
902
+
879
903
returns_arr = returns .values
880
904
benchmark_arr = benchmark .values
881
905
mask = ~ np .isnan (returns_arr ) & ~ np .isnan (benchmark_arr )
@@ -943,7 +967,7 @@ def test_stability_of_timeseries(self, returns, expected):
943
967
(empty_returns , np .nan ),
944
968
(one_return , 1.0 ),
945
969
(mixed_returns , 0.9473684210526313 ),
946
- (pd .Series (np . random .randn (100000 )), 1. ),
970
+ (pd .Series (rand .randn (100000 )), 1. ),
947
971
])
948
972
def test_tail_ratio (self , returns , expected ):
949
973
assert_almost_equal (
@@ -1250,7 +1274,7 @@ def test_value_at_risk(self):
1250
1274
assert_almost_equal (value_at_risk (returns , cutoff = 0.3 ), 81.5 )
1251
1275
1252
1276
# Test a returns stream of 21 data points at different cutoffs.
1253
- returns = np . random .normal (0 , 0.02 , 21 )
1277
+ returns = rand .normal (0 , 0.02 , 21 )
1254
1278
for cutoff in (0 , 0.0499 , 0.05 , 0.20 , 0.999 , 1 ):
1255
1279
assert_almost_equal (
1256
1280
value_at_risk (returns , cutoff ),
@@ -1262,7 +1286,7 @@ def test_conditional_value_at_risk(self):
1262
1286
conditional_value_at_risk = self .empyrical .conditional_value_at_risk
1263
1287
1264
1288
# A single-valued array will always just have a CVaR of its only value.
1265
- returns = np . random .normal (0 , 0.02 , 1 )
1289
+ returns = rand .normal (0 , 0.02 , 1 )
1266
1290
expected_cvar = returns [0 ]
1267
1291
assert_almost_equal (
1268
1292
conditional_value_at_risk (returns , cutoff = 0 ), expected_cvar ,
@@ -1272,7 +1296,7 @@ def test_conditional_value_at_risk(self):
1272
1296
)
1273
1297
1274
1298
# Test a returns stream of 21 data points at different cutoffs.
1275
- returns = np . random .normal (0 , 0.02 , 21 )
1299
+ returns = rand .normal (0 , 0.02 , 21 )
1276
1300
1277
1301
for cutoff in (0 , 0.0499 , 0.05 , 0.20 , 0.999 , 1 ):
1278
1302
# Find the VaR based on our cutoff, then take the average of all
@@ -1360,11 +1384,11 @@ def setUp(self):
1360
1384
self .window = 12
1361
1385
1362
1386
self .returns = pd .Series (
1363
- np . random .randn (1 , 120 )[0 ]/ 100. ,
1387
+ rand .randn (1 , 120 )[0 ]/ 100. ,
1364
1388
index = pd .date_range ('2000-1-30' , periods = 120 , freq = 'M' ))
1365
1389
1366
1390
self .factor_returns = pd .Series (
1367
- np . random .randn (1 , 120 )[0 ]/ 100. ,
1391
+ rand .randn (1 , 120 )[0 ]/ 100. ,
1368
1392
index = pd .date_range ('2000-1-30' , periods = 120 , freq = 'M' ))
1369
1393
1370
1394
def test_roll_pandas (self ):
0 commit comments