考虑p值
library(Hmisc)
res<-rcorr(as.matrix(meta13_raw[-1]))
Sat_Var<-character()
j=1
for(i in 1:nrow(res$r)){
Var_Cor<-as.vector(res$r[,i][-i])
Var_Pval<-as.vector(res$P[,i][-i])
if((all(Var_Cor > 0.5) | all(Var_Cor < -0.5)) & all(Var_Pval < 1)){
Sat_Var[j]<-colnames(res$r)[i]
j=j+1
}
}
考虑有序
#Import data
meta13_raw<-read.csv("/Users/lyc/Desktop/2013_mature_meta_mod.csv",header=T)
colnames(meta13_raw)[1]<-"flavor"
#Fill the missing
library(zoo)
meta13_raw[]<-lapply(meta13_raw,na.aggregate)
#Select variables
vars<-c(colnames(meta13_raw[-1]))
result<-c()
library(MASS)
for(i in 1:582){
fit<-polr(substitute(as.factor(flavor) ~ x,list(x=as.name(vars[i]))) , data=meta13_raw,Hess=TRUE)
result<-rbind(result,c(vars[i],AIC(fit)))
}
result<-result[order(result[,2]),]
Var_end<-result[c(1:10)]
#Create a new dataset
library(dplyr)
meta13_new<-select(meta13_raw,c(Var_end))
fla<-meta13_raw[1]
meta13_end<-cbind(fla,meta13_new)
#Partitioned dataset
set.seed(123)
library(caret)
training.samples<-meta13_end$flavor%>%
createDataPartition(p=0.8,list=FALSE)
train.data<-meta13_end[training.samples,]
test.data<-meta13_end[-training.samples,]
#Build a model
library(MASS)
model<-polr(as.factor(flavor) ~.,data=train.data,Hess = TRUE)
predicted.classes<-model%>%
predict(test.data)
仅单标记回归用十个变量,最通顺的路,测自己96%测14年69%
#Import data
meta13_raw<-read.csv("/Users/lyc/Desktop/2013_mature_meta_mod.csv",header=T)
colnames(meta13_raw)[1]<-"flavor"
#Fill the missing
library(zoo)
meta13_raw[]<-lapply(meta13_raw,na.aggregate)
#Select variables
vars<-c(colnames(meta13_raw[-1]))
result<-c()
library(nnet)
for(i in 1:582){
fit<-nnet::multinom(substitute(flavor ~ x,list(x=as.name(vars[i]))) , data=meta13_raw,MaxNWts=1500)
result<-rbind(result,c(vars[i],AIC(fit)))
}
result<-result[order(result[,2]),]
Var_end<-result[c(1:10)]
#Create a new dataset
library(dplyr)
meta13_new<-select(meta13_raw,c(Var_end))
fla<-meta13_raw[1]
meta13_end<-cbind(fla,meta13_new)
#Partitioned dataset
set.seed(123)
library(caret)
training.samples<-meta13_end$flavor%>%
createDataPartition(p=0.8,list=FALSE)
train.data<-meta13_end[training.samples,]
test.data<-meta13_end[-training.samples,]
#Build a model
library(nnet)
model<-nnet::multinom(flavor ~.,data=train.data,MaxNWts=1500)
predicted.classes<-model%>%
predict(test.data)
head(predicted.classes)
#Test the model
mean(predicted.classes==test.data$flavor)
#Test the model by using 2014data
meta14_raw<-read.csv("/Users/lyc/Desktop/2014_mature_meta_mod.csv",header=T)
colnames(meta14_raw)[1]<-"flavor"
library(zoo)
meta14_raw[]<-lapply(meta14_raw,na.aggregate)
predicted.classes2<-model%>%
predict(meta14_raw)
head(predicted.classes2)
mean(predicted.classes2==meta14_raw$flavor)
[1] "tlfm581" "tlfm801" "tlfm781" "tlfm553" "tlfm917"
[6] "tlfm576" "tlfm896" "tlfm582" "tlfm574" "tlfm883"
能不能找到相关性非常高的去掉啊?