Skip to content

Commit

Permalink
remove redundent types & sample ylats before w1
Browse files Browse the repository at this point in the history
  • Loading branch information
zhaotianjing committed Mar 16, 2021
1 parent 230bef8 commit 0c6e91e
Show file tree
Hide file tree
Showing 6 changed files with 23 additions and 72 deletions.
19 changes: 2 additions & 17 deletions src/1.JWAS/src/JWAS.jl
Original file line number Diff line number Diff line change
Expand Up @@ -183,9 +183,9 @@ function runMCMC(mme::MME,df;

#Nonlinear
if mme.latent_traits == true
yobs = df_whole[!,Symbol(string(Symbol(mme.lhsVec[1]))[1:(end-1)])]
yobs = df[!,Symbol(string(Symbol(mme.lhsVec[1]))[1:(end-1)])]
for i in mme.lhsVec
df_whole[!,i]= yobs
df[!,i]= yobs
end
end
#for deprecated JWAS fucntions
Expand Down Expand Up @@ -326,21 +326,6 @@ function runMCMC(mme::MME,df;
CSV.write(output_folder*"/"*replace(key," "=>"_")*".txt",value)
end

#save the samples from last iteration to help re-train.
#will be deleted later
if mme.hmc == true
writedlm(output_folder*"/hmc_n_latent_trait.txt",mme.M[1].ntraits,',')
writedlm(output_folder*"/hmc_Z.txt",mme.Z,',')
writedlm(output_folder*"/hmc_W1.txt",mme.W1,',')
writedlm(output_folder*"/hmc_W0.txt",mme.W0,',')
writedlm(output_folder*"/hmc_Mu0.txt",mme.Mu0,',')
writedlm(output_folder*"/hmc_mu.txt",mme.mu,',')
writedlm(output_folder*"/hmc_σ2_yobs.txt",mme.σ2_yobs,',')
writedlm(output_folder*"/hmc_vare_mean.txt",mme.vare_mean,',')
writedlm(output_folder*"/hmc_varw_mean.txt",mme.varw_mean,',')
writedlm(output_folder*"/hmc_varw.txt",mme.varw,',')
end

if mme.M != 0
for Mi in mme.M
if Mi.name == "GBLUP"
Expand Down
25 changes: 4 additions & 21 deletions src/1.JWAS/src/MCMC/MCMC_BayesianAlphabet.jl
Original file line number Diff line number Diff line change
Expand Up @@ -166,17 +166,10 @@ function MCMC_BayesianAlphabet(mme,df)
############################################################################
# MCMC (starting values for sol (zeros); mme.RNew; G0 are used)
############################################################################
# Initialize mme for hmc before Gibbs
if mme.hmc == true
num_latent_traits=mme.M[1].ntraits
nMarkers=mme.M[1].nMarkers

mme.W0 = Array{Float32,2}(undef, nMarkers, num_latent_traits)
mme.Z = reshape(mme.ySparse,length(mme.obsID),num_latent_traits) #zeros(length(mme.obsID),num_latent_traits)#
mme.W1 = zeros(num_latent_traits)
mme.mu = mean(mme.ySparse)
mme.vare_mean = 0
mme.varw_mean = 0
# # Initialize mme for hmc before Gibbs
if mme.latent_traits == true
num_latent_traits = mme.M[1].ntraits
mme.weights_NN = vcat(mean(mme.ySparse),zeros(num_latent_traits))
end

@showprogress "running MCMC ..." for iter=1:chain_length
Expand Down Expand Up @@ -218,9 +211,6 @@ function MCMC_BayesianAlphabet(mme,df)
Gibbs(mme.mmeLhs,mme.sol,mme.mmeRhs,mme.R)
end

if mme.hmc == true
mme.Mu0 = mme.sol
end
ycorr[:] = ycorr - mme.X*mme.sol
########################################################################
# 2. Marker Effects
Expand Down Expand Up @@ -272,12 +262,6 @@ function MCMC_BayesianAlphabet(mme,df)
GBLUP!(Mi,ycorr,mme.R,invweights)
end
end
#update W0 in HMC (marker effect)
if mme.hmc == true
for i in 1:num_latent_traits
mme.W0[:,i] = Mi.α[i]
end
end
########################################################################
# Marker Inclusion Probability
########################################################################
Expand Down Expand Up @@ -379,7 +363,6 @@ function MCMC_BayesianAlphabet(mme,df)
nsamples = (iter-burnin)/output_samples_frequency
output_posterior_mean_variance(mme,nsamples)
#mean and variance of posterior distribution
mme.weights_NN= [mme.mu;mme.W1]
output_MCMC_samples(mme,mme.R,(mme.pedTrmVec!=0 ? inv(mme.Gi) : false),outfile)
if causal_structure != false
writedlm(causal_structure_outfile,sample4λ',',')
Expand Down
9 changes: 4 additions & 5 deletions src/1.JWAS/src/Nonlinear/bnn_hmc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,11 @@ end

# helper 2: calculate log p(z|y) to help calculate the acceptance rate
function calc_log_p_z(ylats,yobs,weights_NN,σ_ylats,σ_yobs,ycorr)
μ = weights_NN[1]
μ1 = weights_NN[1]
w1 = weights_NN[2:end]
#logfz = -0.5*sum(((Z-ones(n)*Mu0'-X*W0).^2)*inv(Sigma2z),dims=2) .- (0.5*log(prod(diag(Sigma2z))))
logf_ylats = -0.5*sum((ycorr.^2)*inv(σ_ylats),dims=2) .- (0.5*log(prod(diag(σ_ylats))))
logfy = -0.5*(yobs .- μ - tanh.(ylats)*w1).^2 /σ_yobs .- 0.5*log(σ_yobs)
logfy = -0.5*(yobs .- μ1 - tanh.(ylats)*w1).^2 /σ_yobs .- 0.5*log(σ_yobs)
log_p_ylats= logf_ylats + logfy

return log_p_ylats #size: (n,1)
Expand All @@ -58,12 +58,11 @@ end
function hmc_one_iteration(nLeapfrog,ϵ,ylats_old,yobs,weights_NN,σ_ylats,σ_yobs,ycorr)
nobs, ntraits = size(ylats_old)
ylats_old = copy(ylats_old)
log_p_old = calc_log_p_z(ylats_old,yobs,weights_NN,σ_ylats,σ_yobs,ycorr) - 0.5*sum.^2,dims=2) #(n,1)

ylats_new = copy(ylats_old)

#step 1: Initiate Φ from N(0,M)
Φ = randn(nobs, ntraits) #rand(n,Normal(0,M=1.0)), tuning parameter: M
log_p_old = calc_log_p_z(ylats_old,yobs,weights_NN,σ_ylats,σ_yobs,ycorr) - 0.5*sum.^2,dims=2) #(n,1)
#step 2: update (ylats,Φ) from 10 leapfrog
#2(a): update Φ
Φ += 0.5 * ϵ * calc_gradient_z(ylats_new,yobs,weights_NN,σ_ylats,σ_yobs,ycorr) #(n,l1)
Expand All @@ -84,7 +83,7 @@ function hmc_one_iteration(nLeapfrog,ϵ,ylats_old,yobs,weights_NN,σ_ylats,σ_yo
#Step3. acceptance rate
log_p_new = calc_log_p_z(ylats_new,yobs,weights_NN,σ_ylats,σ_yobs,ycorr) - 0.5*sum.^2,dims=2) #(n,1)
r = exp.(log_p_new - log_p_old) # (n,1)
nojump = rand(n) .> r # bool (n,1)
nojump = rand(nobs) .> r # bool (n,1)

for i in 1:nobs
if nojump[i]
Expand Down
22 changes: 11 additions & 11 deletions src/1.JWAS/src/Nonlinear/nonlinear.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,6 @@ function sample_latent_traits(yobs,mme,ycorr,nonlinear_function)
ylats_old = reshape(ylats_old,nobs,ntraits) #Tianjing's mme.Z
μ_ylats = reshape(μ_ylats,nobs,ntraits)

if nonlinear_function == "Neural Network" #sample weights
X = [ones(nobs) tanh.(ylats_old)]
lhs = X'X + I*0.00001
Ch = cholesky(lhs)
L = Ch.L
iL = inv(L)
rhs = X'yobs
weights = Ch\rhs + iL'randn(size(X,2))*sqrt(σ2_yobs)
mme.weights_NN = weights
end

if nonlinear_function == "Neural Network" #HMC
ylats_new = hmc_one_iteration(10,0.1,ylats_old,yobs,mme.weights_NN,mme.R,σ2_yobs,reshape(ycorr,nobs,ntraits))
else
Expand All @@ -47,6 +36,17 @@ function sample_latent_traits(yobs,mme,ycorr,nonlinear_function)
ylats_new = candidates.*updateus + ylats_old.*(.!updateus)
end

if nonlinear_function == "Neural Network" #sample weights
X = [ones(nobs) tanh.(ylats_new)]
lhs = X'X + I*0.00001
Ch = cholesky(lhs)
L = Ch.L
iL = inv(L)
rhs = X'yobs
weights = Ch\rhs + iL'randn(size(X,2))*sqrt(σ2_yobs)
mme.weights_NN = weights
end

mme.ySparse = vec(ylats_new)
ycorr[:] = mme.ySparse - vec(μ_ylats) # =(ylats_new - ylats_old) + ycorr: update residuls (ycorr)

Expand Down
8 changes: 1 addition & 7 deletions src/1.JWAS/src/build_MME.jl
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,7 @@ models = build_model(model_equations,R);
```
"""
function build_model(model_equations::AbstractString, R = false; df = 4.0,
num_latent_traits = false, hmc = false,
nonlinear_function = false) #nonlinear_function(x1,x2) = x1+x2
num_latent_traits = false, nonlinear_function = false) #nonlinear_function(x1,x2) = x1+x2
if num_latent_traits != false
lhs, rhs = strip.(split(model_equations,"="))
model_equations = ""
Expand Down Expand Up @@ -116,11 +115,6 @@ function build_model(model_equations::AbstractString, R = false; df = 4.0,
if nonlinear_function != false
mme.nonlinear_function = nonlinear_function
end

if hmc == true
mme.hmc = hmc
end
######## END tianjing
end

return mme
Expand Down
12 changes: 1 addition & 11 deletions src/1.JWAS/src/types.jl
Original file line number Diff line number Diff line change
Expand Up @@ -243,15 +243,6 @@ mutable struct MME
weights_NN
σ2_yobs

# tianjing HMC
W1 #W1, vector
W0 #marker effects, matrix of p by l1, each col is for a latent trait
Mu0 #Mu0; vector
mu #overall mean of obsered trait; scaler
vare_mean #moving averaged value of vare
varw #variance of all weights and bias
varw_mean #moving averaged value of varw
fixed_varz #user provide fixed value for varz; e.g., if 2 latent trait, can be [0.5 0; 0 0.6]

function MME(nModels,modelVec,modelTerms,dict,lhsVec,R,ν)
if nModels == 1
Expand All @@ -277,7 +268,6 @@ mutable struct MME
0,
false,false,false,
false,
false,false,false,1.0,
false,false,false,false,false,false,false,false,false,false) # <- tianjing hmc
false,false,false,1.0)
end
end

0 comments on commit 0c6e91e

Please sign in to comment.