@@ -1307,14 +1307,15 @@ def plot_poisson_consistency_test(eval_results, normalize=False, one_sided_lower
13071307 capsize = plot_args .get ('capsize' , 4 )
13081308 hbars = plot_args .get ('hbars' , True )
13091309 tight_layout = plot_args .get ('tight_layout' , True )
1310+ percentile = plot_args .get ('percentile' , 95 )
13101311
13111312 fig , ax = pyplot .subplots (figsize = figsize )
13121313 xlims = []
13131314 for index , res in enumerate (results ):
13141315 # handle analytical distributions first, they are all in the form ['name', parameters].
13151316 if res .test_distribution [0 ] == 'poisson' :
1316- plow = scipy .stats .poisson .ppf (0.025 , res .test_distribution [1 ])
1317- phigh = scipy .stats .poisson .ppf (0.975 , res .test_distribution [1 ])
1317+ plow = scipy .stats .poisson .ppf (( 1 - percentile / 100. ) / 2. , res .test_distribution [1 ])
1318+ phigh = scipy .stats .poisson .ppf (1 - ( 1 - percentile / 100. ) / 2. , res .test_distribution [1 ])
13181319 observed_statistic = res .observed_statistic
13191320 # empirical distributions
13201321 else :
@@ -1326,11 +1327,11 @@ def plot_poisson_consistency_test(eval_results, normalize=False, one_sided_lower
13261327 observed_statistic = res .observed_statistic
13271328 # compute distribution depending on type of test
13281329 if one_sided_lower :
1329- plow = numpy .percentile (test_distribution , 5 )
1330+ plow = numpy .percentile (test_distribution , 100 - percentile )
13301331 phigh = numpy .percentile (test_distribution , 100 )
13311332 else :
1332- plow = numpy .percentile (test_distribution , 2.5 )
1333- phigh = numpy .percentile (test_distribution , 97.5 )
1333+ plow = numpy .percentile (test_distribution , ( 100 - percentile ) / 2. )
1334+ phigh = numpy .percentile (test_distribution , 100 - ( 100 - percentile ) / 2. )
13341335
13351336 if not numpy .isinf (observed_statistic ): # Check if test result does not diverges
13361337 low = observed_statistic - plow
0 commit comments