Sunday, September 23, 2018

Basics of R- Session 17- Factor analysis



#factorANalysis
library(psych)

#import data set
factoranalysis<-read.csv("C:/Users/Administrator.vaio1/Desktop/factor1.1.csv")


library(psych)
library(Hmisc)
library(stats)
library(HSAUR)
library(FactoMineR)

#method 1---principal component analysis

#use principal
#default options will be used here

# use library(psych)

#bartlet test of speriocity

library(psych)
cortest.bartlett(factoranalysis)

#KMO test
KMO(factoranalysis)



PCA2<-principal(factoranalysis)
PCA2$rotation
PCA2$values
PCA2$communality
PCA2$factors
PCA2$scores

#change the options in Principal

PCA2<-principal(factoranalysis,nfactors = 6, residuals = FALSE, rotate = "varimax", n.obs = 414,covar = FALSE,scores = TRUE, missing = FALSE, impute = "median", oblique.scores = TRUE, method = "regression" )

PCA2$rotation #"none", "varimax", "quatimax", "promax", #"oblimin", "simplimax", and "cluster" are also

PCA2$values
PCA2$communality
PCA2$factors
PCA2$scores
PCA2$loadings
PCA2$weights
PCA2$rot.mat
PCA2$chi



Basics of R- Session 16- Discriminant Analysis

discriminant analysis for two group

rm(list=ls())

#Discriminat analysis

#import data set

da1<-read.csv("C:/Users/Administrator.vaio1/Desktop/discriminant two group hair.csv")
fix(da1)

library(MASS)
library(psych)
library(Hmisc)
library(stats)
library(HSAUR)
library(dawai)
#library(rattle)
library(car)
library(MVN)
library(perturb)
library(biotools)
library(FactoMineR)
library(DiscriMiner)


#create a subset of the data for simplicity say x2 and x6, x7,x8,x9,x10

dasub1<-da1[,c(3,7:19)]
fix(dasub1)

#Henze-Zirkler's MVN test

library(MVN)

multinormality<-mvn(dasub1, subset = "x4", mvnTest = "hz")  

# mvnTest "mardia" for Mardia’s test, 
# "hz" for HenzeZirkler’s test, 
# "royston" for Royston’s test, 
# "dh" for Doornik-Hansen’s test
# and energy for E-statistic

multinormality$multivariateNormality

# outliers can also be identified
multinormality<-mvn(dasub1, subset = "x4", mvnTest = "hz", showOutliers = TRUE)  
multinormality$multivariateOutliers
#multicollinearity

#Homogeneity of variance-covariance matrix using Anderson's test, Box's M test

#boxplot
boxplot(dasub1$x6~dasub1$x2)

#box M test
library(biotools)

# box m (IV, D-group)
boxM(dasub1[,-1],dasub1$x2)

# wilks lambda
library(rrcov)
Wilks.test(dasub1[,-1],dasub1$x2)
or
Wilks.test(dasub1[,-1], dasub1$x4)
Wilks.test(x4~., data = dasub1)


# eign value and cannonical correlation

library(candisc)
reg3<-lm(as.matrix(dasub1[,-1])~dasub1$x4)
regcanno<-candisc(reg3)
regcanno


#discriminant analysis
library(MASS)

da11<-lda(x2~x6+x7+x8+x9+x10+x11+x12+x13+x14+x15+x16+x17+x18, data= da1, na.action = "na.omit", cv=TRUE)
summary(da11)
da11$prior
da11$counts
da11$means
da11$scaling  # canonical discriminat function
da11$svd

#linear discriminat analysis (classification function)
library(DiscriMiner)
dasub1$x2<-factor(dasub1$x2)

da12<-linDA(dasub1[,-1],dasub1$x2)
da12
summary(da12)
da12$functions  # fisher classification function
da12$confusion
da12$scores
da12$classification
da12$error_rate

discPower(dasub1[,-1],dasub1$x2)  # # discriminating power, wilks lambda


Sunday, September 16, 2018

Basics of R- Session 15 Multiple Regression 2

# Machine Learning Regression Ridge lasso

install.packages(c("caret", ";", "glmnet;", "mlbench;", "psych"))

library(caret)
library(glmnet)
library(mlbench)
library(psych)
library(ggplot2)

# data set, reg2

reg1<-read.csv("C:/Users/Administrator.vaio1/Desktop/reg2.csv")
# keep only those variables which are suppose to be used in analysis

reg1X<-reg1[,8:19]
reg1X<-as.matrix(reg1X)
fix(reg1X)

reg1Y<-as.matrix(reg1[,20])
fix(reg1Y)

lambda1<-10^seq(10,-2,length =100)

ridge1<-glmnet(reg1X, reg1Y, alpha = 0, lambda = lambda1)
plot(ridge1)

ridge1$lambda[1]
coef(ridge1)[,1] # dislay the first model

ridge1$lambda[2]
coef(ridge1)[,2] # dislay the first model

ridge1$lambda[100]
coef(ridge1)[,100] # dislay the first model




lasso1<-glmnet(reg1X, reg1Y, alpha = 1, lambda = lambda1)
plot(ridge1)

lasso1$lambda[1]
coef(lasso1)[,1] # dislay the first model

lasso1$lambda[2]
coef(lasso1)[,2] # dislay the first model

lasso1$lambda[100]
coef(lasso1)[,100] # dislay the first model

#-------------------------------------#
#--------------------------------------------------------------#

# Machine Learning Regression Ridge lasso

install.packages(c("caret", ";", "glmnet;", "mlbench;", "psych"))

library(caret)
library(glmnet)
library(mlbench)
library(psych)
library(ggplot2)


reg1<-read.csv("C:/Users/Administrator.vaio1/Desktop/reg2.csv")
reg1<-reg1[,8:20]
# linear regression
lm<-train(X19~.,reg1, method= "lm")
summary(lm)
# ridge regression
ridge1<-train(X19~.,reg1, method= "glmnet", 
              tuneGrid=expand.grid(alpha =0, lambda = seq(0.0001,1,length=100)))

ridge1
plot(ridge1)
plot(ridge1$finalModel)
plot(ridge1$finalModel, xvar = "lambda", label = TRUE)

# parameters alpha and lambda
ridge
ridge$bestTune

# coeficient of best model
coef(ridge$finalModel)
coef(ridge$finalModel, s=ridge$bestTune$lambda) # for which value of lambda
coef(ridge$finalModel, s=ridge$bestTune$alpha) # for which value of alpha

plot(varImp(ridge1))


# best model of ridge
ridge1$bestTune
bestridge<-ridge1$finalModel
coef(bestridge, s=ridge1$bestTune$lambda)




# lasso regression

lasso1<-train(X19~.,reg1, method= "glmnet", 
              tuneGrid=expand.grid(alpha =1, lambda = seq(0.0001,1,length=100)))

lasso1
plot(lasso1)
plot(lasso1$finalModel)
plot(lasso1$finalModel, xvar = "lambda", label = TRUE)
plot(lasso1$finalModel, xvar = "dev", label = TRUE)
plot(varImp(lasso1))


# best model of lasso
lasso1$bestTune
bestlasso<-lasso1$finalModel
coef(bestlaso, s=lasso1$bestTune$lambda)


# elastic net regression


en1<-train(X19~.,reg1, method= "glmnet", 
              tuneGrid=expand.grid(alpha =seq(0,1, length=10), lambda = seq(0.0001,1,length=100)))


plot(en1)
plot(en1$finalModel)
plot(en1$finalModel, xvar = "lambda", label = TRUE)
plot(en1$finalModel, xvar = "dev", label = TRUE)
plot(varImp(en1))

# best model of elastic net

en1$bestTune
besten<-en1$finalModel
coef(besten, s=en1$bestTune$lambda)







Basics of R- Session 15 Multiple Regression 1

rm(list=ls())

reg1<-read.csv("C:/Users/Administrator.vaio1/Desktop/reg2.csv")

#check/ explore the file
str(reg1)
head(reg1)

# Descriptive statistics of all the variables

library(psych)
describe(reg1)

library(pastecs)
stat.desc(reg1)


# here we will  use x19 satisfaction as dependent variable
# and X7 - E-Commerce Activities,X8 - Technical Support,
#X9 - Complaint,Resolution,X10 - Advertising,X11 - Product Line,
#X12 - Salesforce Image,X13 - Competitive Pricing,X14 - Warranty & Claims,
#X15 - New Products,X16 Order & Billing,
#X17 - Price Flexibility,and X18 - Delivery Speed

# Normality of only error terms has to be checked

#split the data set into two parts- training and test data set

library(caTools)
set.seed(1000)
split1 = sample.split(reg1$id, SplitRatio = 0.70)
summary(split1)


#training data set
reg1train<-subset(reg1,split1==TRUE)
str(reg1train)
dim(reg1train)
#validation data set
reg1Val<-subset(reg1,split1==FALSE)
str(reg1Val)
dim(reg1Val)


# Remove all the missing observation
dim(reg1train)
reg1train<-na.omit(reg1train)
dim(reg1train)


#outliers
#to identify the outliers, use box plot, here we had used/ explained it for a single variable
boxplot(reg1train$id)
#convert them into log to remove the outliers effect
boxplot(log(reg1train$id))



#assumptions of MLRegression
#1. Linear relationship
# plot to check the linearity- bivaraite plot

plot(reg1train$X19...Satisfaction, reg1train$X7...E.Commerce.Activities)
# this plot is only indicative, it may not make any sense
library(ggplot2)
ggplot(reg1train, aes(reg1train$X19,reg1train$X7))+geom_smooth()

#plot a linear relation between the variables and try to find weather it is linear or not
#y~X
regx19_x7<-lm(reg1train$X19~reg1train$X7)
summary(regx19_x7)
# check the coefficient of b1 only, correlation

# use correlation
library(stats)
cor(reg1train$X7, reg1train$X19)
cor.test(reg1train$X7, reg1train$X19)


# Application of the Model

# Multiple Linear Regression Example

# ---------------fit the model------------------#

fit1<-lm(X19~X7+X8+X9+X10+X11+X12+X13+X14+X15+X16+X17+X18, data = reg1train)
summary(fit1)
attributes(fit1)

fit1$coefficients
fit1$residuals

# check outliers
library(car)
outlierTest(reg1)

# give the items with influencial values
influenceIndexPlot(reg1)


# to check residual analysis, heteroscadity, etc we use plots
plot(fit1)

#-----------Multicollinearity-------------------------#
library(car)
vif(fit1) #variance inflation factors  multicollinearity

#plot to find randomness of the residuals

plot(fit1$residuals, c(1:length(fit1$residuals)))


#normality for residual values
qqnorm(fit1$residuals, pch=20, cex=1)
qqline(fit1$residuals, col="red",lwd=3)
boxplot(fit1$residuals)
#sapiro test has to be applied

#there seems to be a lot of outliers which are creating the problem of non normality of residuals

#autocorrelation
library(lmtest)
dwtest(fit1)

#test for heteroscadisticity
plot(fit1$residuals, fit1$fitted.values)

#from the plot it is not easy to analyze the pattern of the data so we will apply bptest
bptest(fit1)

#-----------------------------------#


# --------------------for stepwise forward backward Method------------#

library(leaps)
fit2<-regsubsets(X19~X7+X8+X9+X10+X11+X12+X13+X14+X15+X16+X17+X18, data=reg1train, nbest = 3)
fit2
summary(fit2)

plot(fit2, scale ="r2")
plot(fit2, scale ="adjr2")
plot(fit2, scale ="bic")

#-------------------#


library(MASS)

regback<-stepAIC(fit1,direction = "backward", trace = 1)
summary(regback)
regback$anova

regfor<-stepAIC(fit1, direction = "forward", trace = 1)
summary(regfor)
regfor$anova

regall<-stepAIC(fit1,direction = c("both", "backward", "forward"), trace = 1)

summary(regall)
regall$anova
#-----------------------------#

# training and test model comparison
compare the two model with respect to R2 and adjR2 and coeefficient

then can compare them with predicted values


fitfinal<-lm(x19~x6+x7+x9+x11+x12+x13+x17,data = regtrain)
summary(fitfinal)

fitfinaltest<-lm(x19~x6+x7+x9+x11+x12+x13+x17,data = regtest)
summary(fitfinaltest)

predict1<-predict(fitfinal, regtest)

library(Metrics)
rmse(predict1, regtest$x19)

Low value of RMSE indicates a good model

AIC(fitfinal)
AIC(fitfinaltest)

BIC(fitfinal)
BIC(fitfinaltest)



Saturday, September 8, 2018

Basics of R- Session 14- using library dplyr

library(dplyr)
# import the file

mbaper<-read.csv("C:/Users/Administrator.vaio1/Desktop/MBAdata.csv")

#Equivalent of str in traditional approach. Gives the structure of data
str(mbaper)
fix(mbaper)
glimpse(mbaper)

# adding or removing variables from the file
mbaper_age<-select(mbaper, Age_in_years_completed)
fix(mbaper_age)
mbaper_age<-select(mbaper, -Age_in_years_completed)
fix(mbaper_age)

mbaper_age<-select(mbaper, Age_in_years_completed:Mothers_qualification)
fix(mbaper_age)

#-------------------------------------------------------#

#To filter data based on some conditions we could use the filter function=filter(dataset, variable==value)
mbaper_age<-filter(mbaper, Percentage_in_10_Class==75)
fix(mbaper_age)

mbaper_age<-filter(mbaper, Percentage_in_10_Class<75)
fix(mbaper_age)

mbaper_age<-filter(mbaper, Percentage_in_10_Class>75)
fix(mbaper_age)

#-------------------------------------------------------#

# specific category from a categorical variable
mbaper_zone<-filter(mbaper, STATE=="North East Zone")
fix(mbaper_zone)

# more than one category
#   , for and
mbaper_zone<-filter(mbaper, STATE=="North East Zone",STATE=="Central Zone")
fix(mbaper_zone)

#   | for or
mbaper_zone<-filter(mbaper, STATE=="North East Zone" | STATE=="Central Zone")
fix(mbaper_zone)

#   | for or### exact word has to be used
mbaper_zone<-filter(mbaper, STATE=="North East Zone" | Previous_Degree=="Commerce")
fix(mbaper_zone)

mbaper_zone<-filter(mbaper, STATE=="North East Zone" , Previous_Degree=="Commerce")
fix(mbaper_zone)

#-------------------------------------#

#Select columns and then subset data based on a condition, combination of rows and variables

mbaper_age<-select(mbaper, Age_in_years_completed)
fix(mbaper_age)

mbaper_age<-filter(select(mbaper, Age_in_years_completed),Age_in_years_completed==25)
fix(mbaper_age)

# --------------------------------------------------# pipe function

1:8 %>% sum
1:8 %>% sum %>% sqrt
1:8 %>% sum %>% sqrt%>% sum*10

#---------------------------------#
mbaper_age<-select(mbaper, Age_in_years_completed) %>%
  filter(Age_in_years_completed==22)

fix(mbaper_age)

mbaper_age<-select(mbaper, Age_in_years_completed) %>%
  arrange(Age_in_years_completed)

mbaper_age<-select(mbaper, Age_in_years_completed) %>%
  arrange(-Age_in_years_completed)