-
Notifications
You must be signed in to change notification settings - Fork 0
/
PCA - Example
173 lines (115 loc) · 4.73 KB
/
PCA - Example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
# PCA tuto: http://www.sthda.com/english/wiki/principal-component-analysis-how-to-reveal-the-most-important-variables-in-your-data-r-software-and-data-mining
# Marie de Léséleuc Feb 2016
rm(list = ls())
#install.packages("reshape")
#install.packages("multcomp")
#install.packages("vcd")
#install.packages("Rcpp")
#install.packages("FactoMineR")
library(reshape)
library(multcomp)
library(vcd)
library(Rcpp)
library(FactoMineR)
# setting path to access my csv file
setwd("C:\\Users\\mdeleseleuc\\Documents")
mydata<- read.csv(file="PCA.csv",head=TRUE,sep=";")
mydata <- mydata[1:8,]
head(mydata)
str(mydata)
# remove undesirable dimensions from mydata and cast dimensions as numerical or categorical (factor)
mydata.sub <- mydata[,2:length(mydata)]
for (i in 1:2) {
mydata.sub[,i] <- as.factor(mydata.sub[,i])
}
for (i in 3:166) {
mydata.sub[,i] <- as.numeric(mydata.sub[,i])
}
str(mydata.sub)
# variable are numbered to refer to them in PCA
var.rank <- data.frame(Variable = names(mydata.sub), Rank = 1:166)
# Summary data
summary(mydata.sub)
# Run PCA
mydata.sub.pca <- PCA(mydata.sub, scale.unit = TRUE, quali.sup = 1:2, graph=FALSE)
summary(mydata.sub.pca)
# 4 first dimensions account for 68% of variance
# 5 first dimensions account for 80% of variance
# Proportion of variation retained by the PCs
eigenvalues <- mydata.sub.pca$eig
head(eigenvalues[, 1:2])
# Visualization of importance of PCs
library(devtools)
install_github("kassambara/factoextra")
library("factoextra")
fviz_screeplot(mydata.sub.pca, ncp=7)
# Take a look at the important dimensions
summary(mydata.sub.pca, nbelement=166, ncp=5) # ncp = 5 since 5 first dim account for 80% of variance
head(mydata.sub.pca$var$cos2)
dimdesc(mydata.sub.pca, axe =1)
dimdesc(mydata.sub.pca, axe =2)
dimdesc(mydata.sub.pca, axe =3)
dimdesc(mydata.sub.pca, axe =4)
dimdesc(mydata.sub.pca, axe =5)
# Coordinates of variables
head(mydata.sub.pca$var$coord)
fviz_pca_var(mydata.sub.pca)
plot(mydata.sub.pca, choix='var', cex=.7, axes=c(1,2), new.plot = TRUE , xlim=range(-1:1), ylim=range(-1:1))
plot(mydata.sub.pca, choix='var', cex=.7, axes=c(3,4), new.plot = TRUE , xlim=range(-1:1), ylim=range(-1:1))
# Quality of the representation for var.
head(mydata.sub.pca$var$cos2)
# The closer to the circle of correlations, the better
fviz_pca_var(mydata.sub.pca, col.var="cos2") +
scale_color_gradient2(low="white", mid="blue",
high="red", midpoint=0.5) + theme_minimal()
# Contribution of the variables to the principal component
# The highest the contribution with the first PCs, the more important the var.
head(mydata.sub.pca$var$contrib, 10)
# most important variables:
# var. above red line = important to component
# Contribution to PC1
x <- fviz_contrib(mydata.sub.pca, choice = "var", axes = 1)
mydata.sub.pca.PC1.var <- data.frame(x$data[which(x$data$val > 0.75),1]) # 0.75 = red line
# Contribution to PC2
y <- fviz_contrib(mydata.sub.pca, choice = "var", axes = 2)
mydata.sub.pca.PC2.var <- data.frame(y$data[which(y$data$val > 0.67),1])
# Total contribution on PC1 and PC2
z <- fviz_contrib(mydata.sub.pca, choice = "var", axes = 1:2)
mydata.sub.pca.PC1PC2.var <- data.frame(z$data[which(z$data$val > 37),1])
# Show only important variables
fviz_contrib(mydata.sub.pca, choice = "var", axes = 1, top = 50)
fviz_contrib(mydata.sub.pca, choice = "var", axes = 2, top = 50)
# Control variable colors using their contributions
fviz_pca_var(mydata.sub.pca, col.var="contrib")
# Change the gradient color
fviz_pca_var(mydata.sub.pca, col.var="contrib") +
scale_color_gradient2(low="white", mid="blue",
high="red", midpoint=50) + theme_minimal()
# Dimension description
# Use dimdesc() to identify the most correlated var. with a given PC
pca.desc <- dimdesc(mydata.sub.pca, axes = c(1,2,3,4))
# Description of dimension 1
x <- pca.desc$Dim.1
str(x)
x <- list(x)
x <- na.omit(x)
x <- x[order(x$quanti.p.value),]
# Description of dimension 2
pca.desc$Dim.2
# Description of dimension 3
pca.desc$Dim.3
# Cos2 : quality of the representation for individuals on the principal components
# The squared cosine shows the importance of a component for a given observation.
fviz_pca_ind(mydata.sub.pca, col.ind="cos2") +
scale_color_gradient2(low="white", mid="blue",
high="red", midpoint=0.50) + theme_minimal()
# Contribution of the individuals to the princial components
# The most contributing individuals associated with a given PC can be visualized as follow :
# Contributions of individuals to PC1
fviz_pca_contrib(mydata.sub.pca, choice = "ind", axes = 1)
# Clustering
mydata.sub.pca.hcpc <- HCPC(mydata.sub.pca)
names(mydata.sub.pca.hcpc)
mydata.sub.pca.hcpc$desc.var
# Description of dimension 4
pca.desc$Dim.4