forked from SciML/SciMLBook
-
Notifications
You must be signed in to change notification settings - Fork 42
/
Copy pathpi.jl
152 lines (121 loc) · 3.26 KB
/
pi.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
using Distributions, Base.Threads, BenchmarkTools
"""
function estimatepi(n)
Runs a simple Monte Carlo method
to estimate pi with n samples.
"""
function estimate_pi(n)
count = 0
for i=1:n
x = rand(Uniform(-1.0, 1.0))
y = rand(Uniform(-1.0, 1.0))
count += (x^2 + y^2) <= 1
end
return 4*count/n
end
function estimate_pi_inner_threads(n)
counts = zeros(Int,Threads.nthreads())
@threads for i=1:n
x = rand(Uniform(-1.0, 1.0))
y = rand(Uniform(-1.0, 1.0))
counts[Threads.threadid()] += (x^2 + y^2) <= 1
end
return 4*sum(counts)/n
end
"""
Compute pi in parallel, over ncores cores, with a Monte Carlo simulation throwing N total darts
"""
# number 1
function estimate_pi_tasks_vector(N::Int)
ntasks = Base.Threads.nthreads()
slices_of_pi = Vector{Float64}(undef, ntasks)
n = N ÷ ntasks
@sync for tid in 1:ntasks
Threads.@spawn slices_of_pi[tid] = estimate_pi(n)
end
return sum(slices_of_pi) / ntasks
end
# number 6
function estimate_pi_6(N::Int)
ntasks = Base.Threads.nthreads()
slices_of_pi = Vector{Float64}(undef, ntasks)
n = N ÷ ntasks
Threads.@threads for tid in 1:ntasks
slices_of_pi[tid] = estimate_pi(n)
end
return sum(slices_of_pi) / ntasks
end
import ThreadsX
function throw_dart()
x = rand(Uniform(-1.0, 1.0))
y = rand(Uniform(-1.0, 1.0))
return (x^2 + y^2) <= 1
end
estimate_pi_ThreadX(N) =ThreadsX.sum( _->throw_dart() , 1:N )*4/N
#ThreadsX(_->throw_dart(),1:N)
"""
Compute pi in parallel, over ncores cores, with a Monte Carlo simulation throwing N total darts with channels
"""
# number 2
function estimate_pi_tasks_channel(N::Int)
ntasks = Base.Threads.nthreads()
ch = Channel{Float64}(ntasks)
n = N ÷ ntasks
@sync for _ in 1:ntasks
Threads.@spawn put!(ch, estimate_pi(n))
end
sum_of_pis = sum(take!(ch) for _ in 1:ntasks)
return sum_of_pis / ntasks
#slices_of_pi = collect(take!(ch) for _ in 1:ntasks)
#return slices_of_pi
end
mutable struct Counter{T}
@atomic val::T
end
"""
function estimatepi(n)
Runs a simple Monte Carlo method
to estimate pi with n samples.
"""
# number 3
function estimate_pi_atomic(n)
count = Counter(0)
Threads.@threads for i=1:n
x = rand(Uniform(-1.0, 1.0))
y = rand(Uniform(-1.0, 1.0))
@atomic count.val += (x^2 + y^2) <= 1
end
return 4*count.val/n
end
# number 4
function estimate_pi_tasks_atomic(N::Int)
ntasks = Base.Threads.nthreads()
#slices_of_pi = Vector{Float64}(undef, ntasks)
n = N ÷ ntasks
sum = Counter(0.0)
@sync for tid in 1:ntasks
Threads.@spawn begin
@atomic sum.val += estimate_pi(n)
end
end
return sum.val / ntasks
end
# number 5
function estimate_pi_5(N::Int)
v = zeros(N)
Threads.@threads for i ∈ 1:N
v[i] = estimate_pi(1)
end
return sum(v)/N
end
N = 2_000_000
serial = @belapsed estimate_pi(N) seconds=1
pchannel = @belapsed estimate_pi_tasks_channel(N) seconds=1
pvector = @belapsed estimate_pi_tasks_vector(N) seconds=1
patomic = @belapsed estimate_pi_atomic(N) seconds=1
ptasksatomic = @belapsed estimate_pi_tasks_atomic(N) seconds=1
p5 = @belapsed estimate_pi_5(N) seconds=1
p6 = @belapsed estimate_pi_6(N) seconds=1
p7 = @belapsed estimate_pi_inner_threads(N) seconds=1
p8 = @belapsed estimate_pi_ThreadX(N) seconds=1
pvector/serial, pchannel/serial, patomic/serial, ptasksatomic/serial, p5/serial, p6/serial, p7/serial, p8/serial