安宁哟

研究变量之间的相关性,发现别的重要变量

随/机/森/林(课上)

```

setwd("~/Desktop")

d = read.csv("train.csv",header = TRUE)

View(d)

dc = d[complete.cases(d),]

d0 = d[d$y==0,]

d1 = d[d$y==1,]

d2 = d[d$y==2,]

d3 = d[d$y==3,]

#产生与各类别内变量数量相等的一到十的数值,为之后给数字贴标签做铺垫

label0 = sample(c(1:10),dim(d0[1]),replace = TRUE)

label1 = sample(c(1:10),dim(d1[1]),replace = TRUE)

label2 = sample(c(1:10),dim(d2[1]),replace = TRUE)

label3 = sample(c(1:10),dim(d3[1]),replace = TRUE)


d0_train = d0[label0<=5,]

d0_test = d0[label0>5,]

d1_train = d1[label1<=5,]

d1_test = d1[label1>5,]

d2_train = d2[label2<=5,]

d2_test = d2[label2>5,]

d3_train = d3[label3<=5,]

d3_test = d3[label3>5,]

d_train = rbind(d0_train, d1_train,d2_train, d3_train)

d_test = rbind(d0_test,d1_test,d2_test,d3_test)


library(nnet)

re_log = multinom(y~.-id,data=d_train)

#类似于glm,但生成的模型是面对多种response的逻辑回归

pred_log = predict(re_log,newdata = d_test)

#按照还是上一步的模型跑测试集数据,注意data来源前面写newdata

tab_log = table(d_test$y,pred_log)

#出现类似混淆矩阵看模型。d_test$y是只看test中y

#计算对于0的精确度:左上角除以第一行的总和

library(rpart)

re_id3_mis = rpart(y~.-id,data = d_train)

re_id3 = rpart(y~.-id,data = d_train,method = "class")

library(RWeka)

re_id3 = rpart(y~.-id,data = d_train,method = "class",parms = list(split="information"))

pred_id3 = predict(re_id3,newdata = d_test,type = "class")

re_CART = rpart(y~.-id,data = d_train,method = "class",parms = list(split="gini"),control = rpart.control(cp = 0.0001))

pred_CART = predict(re_CART,newdata = d_test,type = "class")

table(d_test$y,pred_CART)

re_CART$cptable

min = which.min(re_CART$cptable[,4])

min

re_CART_f = prune(re_CART,cp=re_CART$cptable[min,1])

table(d_test$y,pred_CART)

plot(re_CART)

#随机森林

d_train$y = as.factor(d_train$y)

re_rf = randomForest(y~.-id,data=d_train,ntree=5)

pred_rf = predict(re_rf,newdata = d_test,type="prob")

d_train$y[d_train$y>=1]=1

d_test$y[d_test$y>=1]=1

```

蒙卡5.2


```

n=100

#蒙特卡洛仿真

alpha=c()

library(MASS)

for (i in 1:100){

mu1=c(0,0)

sigma1=matrix(c(1,0.5,0.5,1.25),nrow=2)

rand1=mvrnorm(n=100,mu=mu1,sigma=sigma1)

X=rand1[,1]

Y=rand1[,2]

alpha[i]=(var(Y)-cov(X,Y))/(var(X)+var(Y)-2*cov(X,Y))

}

rand1



for (j in 1:100)

{

ran=rand1[sample(c(1:100),100,replace=TRUE),]

#此处从(1:100)指的是要抽取100个数,后一个100指的是要总共抽取100个数

X=ran[,1]

Y=ran[,2]

alpha[j]=(var(Y)-cov(X,Y))/(var(X)+var(Y)-2*cov(X,Y))

#rand1用来储存多元正态分布的新的观测值(满足分布的新值)

#ran是将rand1中的100个数,随机有放回的抽取,形成新的一组response

```

P/C/A K/me/ans P422 10


```

#a

X <- rbind(matrix(rnorm(20*50, mean = 0), nrow = 20),

matrix(rnorm(20*50, mean=0.7), nrow = 20),

matrix(rnorm(20*50, mean=1.4), nrow = 20))


#b

X.pca = prcomp(X)$x

plot(X.pca[,1:2], col=c(rep(1,20), rep(2,20), rep(3,20)))


#c

res = kmeans(X, centers = 3)

true_class = c(rep(1,20), rep(2,20), rep(3,20))

table(res$cluster, true_class)


#d

res = kmeans(X, centers = 2)

true = c(rep(1,20), rep(2,20), rep(3,20))

table(res$cluster, true_class)


#e

res = kmeans(X, centers = 4)

true = c(rep(1,20), rep(2,20), rep(3,20))

table(res$cluster, true_class)


#f

res = kmeans(X.pca[,1:2], centers = 3)

true = c(rep(1,20), rep(2,20), rep(3,20))

table(res$cluster, true_class)


#g

res = kmeans(scale(X), centers = 3)

true = c(rep(1,20), rep(2,20), rep(3,20))

table(res$cluster, true_class)


#h

withss = rep(NA,20)

for (k in 1:length(withss)) {

withss[k] = sum(kmeans(X,k)$withinss)

}

plot(withss)

```

马/氏/距/离


```

d=read.csv("hmeq.csv",na.strings="")

dim(d)

View(d)

dc =d[complete.cases(d),] #remove missing values

dim(dc)


mdist = function(x) #马氏距离function定义

t = as.matrix(x) #将x转换为矩阵

p = dim(t)[2] #选择列

m = apply(t,2,mean) #对列求均值

s = var(t) #方差,此步即可获得协方差矩阵

return(mahalanobis(t,m,s)) 

}


#数据分类

dc1 = dc[dc$BAD==1,] #数据集的第一列是定性变量1/0

dc0 = dc[dc$BAD==0,]

#计算各分组的马氏距离

mdc1 = mdist(dc1[,-c(1,5,6)]) #去除第1,5,6列,因为不是数值型

mdc0 = mdist(dc0[,-c(1,5,6)])

c=qchisq(0.99,10) #卡方分布检验拟合度 异常值,99%的应该服从什么分布,这个临界值是多少

#10是自由度,一共13个变量,减去1,5,6列之后是10个


mdc=mdist(dc[,-c(1,5,6)])


#用马氏距离进行筛选

x1=dc1[mdc1

x0=dc0[mdc0



lg.fit = glm(BAD~.,data=d,family=binomial)

summary(lg.fit$fitted.values)


pred1 = predict(lg.fit,type="response")

pred1[pred1>0.3] <-1 #将上一步的结果归类到以0.3为临界值的分类器中

pred1[pred1<=0.3] <-0


table(pred1,dc$BAD)


library(pROC)

roc(dc$BAD,pred1,plot=TRUE,print.auc=TRUE,legacy.axes=TRUE)

#反应变量,predictor,画出图,标出auc面积,确保横坐标(0,1)排序

```

自己的!决/策/树/随/机/森/林


```

names(heart)

heart$target[heart$target==1]<-"Yes"

heart$target[heart$target==0]<-"No"

set.seed(100)

train<-sample(nrow(heart),0.7*nrow(heart))

theart<-heart[train,]

vheart<-heart[-train,]

#library(rpart)

dtree<-rpart(target~.,data=theart,method="class",parms=list(split="gini"))

printcp(dtree)

print(dtree)

tree<-prune(dtree,cp=dtree$cptable[which.min(dtree$cptable[,"xerror"]),"CP"])

opar<-par(no.readonly=T)

par(mfrow=c(1,2))

#install.packages("rpart.plot")

library(rpart.plot)

rpart.plot(dtree,branch=1,type=4,fallen.leaves=T,cex=0.6,sub="before")

rpart.plot(tree,branch=1,type=4,fallen.leaves=T,cex=0.6,sub="after")

predtree<-predict(tree,newdata=vheart,type="class")

table(vheart$target,predtree,dnn=c("real","predict"))



#随机森林

library("randomForest")

data.index = sample(c(1,2), nrow(heart), replace = T, prob = c(0.7, 0.3))

train_data = heart[which(data.index == 1),]

test_data = heart[which(data.index == 2),]

n<-length(names(train_data))

rate=1

for(i in 1:(n-1)){

    set.seed(100)

    rf_train<-randomForest(as.factor(train_data$target)~.,data=train_data,mtry=i,ntree=1000)

    rate[i]<-mean(rf_train$err.rate)

print(rf_train)   

}

rate

plot(rate)

set.seed(100)

rf_train<-randomForest(as.factor(train_data$target)~.,data=train_data,mtry=2,ntree=1000)

plot(rf_train)

set.seed(100)

rf_train<-randomForest(as.factor(train_data$target)~.,data=train_data,mtry=2,ntree=800,importance=TRUE,proximity=TRUE) 

importance<-importance(rf_train)

barplot(rf_train$importance[,1],main="importance")

box()

importance(rf_train,type=2)

varImpPlot(x=rf_train,sort=TRUE,n.var=nrow(rf_train$importance))

print(rf_train)

hist(treesize(rf_train))

max(treesize(rf_train));min(treesize(rf_train))

MDSplot(rf_train,train_data$target,palette=rep(1,2),pch=as.numeric(train_data$target))

pred<-predict(rf_train,newdata=test_data) 

pred_out_1<-predict(object=rf_train,newdata=test_data,type="prob")

table <- table(pred,test_data$target)

sum(diag(table))/sum(table)

plot(margin(rf_train,test_data$target))



P200 Q2 Q8

We will now derive the probability that a given observation is part of a bootstrap sample. Suppose that we obtain a bootstrap sample from a set of nnobservations.

What is the probability that the first bootstrap observation is not the jth observation from the original sample ? Justify your answer.

1−1/n

What is the probability that the second bootstrap observation is not the jth observation from the original sample ?

1−1/n

Argue that the probability that the jth observation is not in the bootstrap sample is (1−1/n)n(1−1/n)n.

As bootstrapping sample with replacement, we have that the probability that the jth observation is not in the bootstrap sample is the product of the probabilities that each bootstrap observation is not the jth observation from the original sample

(1−1/n)⋯(1−1/n)=(1−1/n)n(1−1/n)⋯(1−1/n)=(1−1/n)n

as these probabilities are independant.

When n=5n=5, what is the probability that the jth observation is in the bootstrap sample ?

We have

P(jth obs in bootstrap sample)=1−(1−1/5)5=0.672.P(jth obs in bootstrap sample)=1−(1−1/5)5=0.672.

When n=100n=100, what is the probability that the jth observation is in the bootstrap sample ?

We have

P(jth obs in bootstrap sample)=1−(1−1/100)100=0.634.P(jth obs in bootstrap sample)=1−(1−1/100)100=0.634.

When n=10000n=10000, what is the probability that the jth observation is in the bootstrap sample ?

We have

P(jth obs in bootstrap sample)=1−(1−1/10000)10000=0.632.P(jth obs in bootstrap sample)=1−(1−1/10000)10000=0.632.

Create a plot that displays, for each integer value of nnfrom 1 to 100000, the probability that the jth observation is in the bootstrap sample. Comment on what you observe.

x<-1:100000

plot(x, 1-(1-1/x)^x)



Q8

We will now perform cross-validation on a simulated data set.

Generate a simulated data set as follows :

set.seed(1)

y<-rnorm(100)

x<-rnorm(100)

y<-x-2*x^2+rnorm(100)

In this data set, what is nn and what is pp? Write out the model used to generate the data in equation form.

Here we have that n=100n=100 and p=2p=2, the model used is

Y=X−2X2+ε.Y=X−2X2+ε.

Create a scatterplot of XX against YY. Comment on what you find.

plot(x, y)

Set a random seed, and then compute the LOOCV errors that result from fitting the following four models using least squares :

Y=β0+β1X+εY=β0+β1X+ε

library(boot)

set.seed(1)

Data<-data.frame(x, y)

fit.glm.1<-glm(y~x)

cv.glm(Data, fit.glm.1)$delta[1]

## [1] 5.890979

Y=β0+β1X+β2X2+εY=β0+β1X+β2X2+ε

fit.glm.2<-glm(y~poly(x, 2))

cv.glm(Data, fit.glm.2)$delta[1]

## [1] 1.086596

Y=β0+β1X+β2X2+β3X3+εY=β0+β1X+β2X2+β3X3+ε

fit.glm.3<-glm(y~poly(x, 3))

cv.glm(Data, fit.glm.3)$delta[1]

## [1] 1.102585

Y=β0+β1X+β2X2+β3X3+β4X4+εY=β0+β1X+β2X2+β3X3+β4X4+ε

fit.glm.4<-glm(y~poly(x, 4))

cv.glm(Data, fit.glm.4)$delta[1]

## [1] 1.114772

Repeat (c) using another random seed, and report your results. Are your results the same as what you got in (c) ? Why ?

set.seed(10)

fit.glm.1<-glm(y~x)

cv.glm(Data, fit.glm.1)$delta[1]

## [1] 5.890979

fit.glm.2<-glm(y~poly(x, 2))

cv.glm(Data, fit.glm.2)$delta[1]

## [1] 1.086596

fit.glm.3<-glm(y~poly(x, 3))

cv.glm(Data, fit.glm.3)$delta[1]

## [1] 1.102585

fit.glm.4<-glm(y~poly(x, 4))

cv.glm(Data, fit.glm.4)$delta[1]

## [1] 1.114772

The results above are identical to the results obtained in (c) since LOOCV evaluates nn folds of a single observation.

Which of the models in (c) had the smallest LOOCV error ? Is this what you expected ? Explain your answer.

We may see that the LOOCV estimate for the test MSE is minimum for “fit.glm.2”, this is not surprising since we saw clearly in (b) that the relation between “x” and “y” is quadratic.

Comment on the statistical significance of the coefficient estimates that results from fitting each of the models in (c) using least squares. Do these results agree with the conclusions drawn based on the cross-validation results ?

summary(fit.glm.4)

## 

## Call:

## glm(formula = y ~ poly(x, 4))

## 

## Deviance Residuals: 

##    Min      1Q Median 3Q     Max 

## -2.8914 -0.5244 0.0749 0.5932  2.7796 

## 

## Coefficients:

## Estimate Std. Error t value Pr(>|t|) 

## (Intercept) -1.8277    0.1041 -17.549 <2e-16 ***

## poly(x, 4)1 2.3164    1.0415 2.224 0.0285 * 

## poly(x, 4)2 -21.0586 1.0415 -20.220 <2e-16 ***

## poly(x, 4)3 -0.3048    1.0415 -0.293 0.7704   

## poly(x, 4)4 -0.4926    1.0415 -0.473 0.6373   

## ---

## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

## 

## (Dispersion parameter for gaussian family taken to be 1.084654)

## 

##    Null deviance: 552.21 on 99 degrees of freedom

## Residual deviance: 103.04 on 95 degrees of freedom

## AIC: 298.78

## 

## Number of Fisher Scoring iterations: 2

The p-values show that the linear and quadratic terms are statistically significants and that the cubic and 4th degree terms are not statistically significants. This agree strongly with our cross-validation results which were minimum for the quadratic model.


P174 12









P337 3


https://www-bcf.usc.edu/~gareth/ISL/ISLR%20First%20Printing.pdf


www.github.com/cesanlau

最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 212,185评论 6 493
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 90,445评论 3 385
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 157,684评论 0 348
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 56,564评论 1 284
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 65,681评论 6 386
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 49,874评论 1 290
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 39,025评论 3 408
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 37,761评论 0 268
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 44,217评论 1 303
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 36,545评论 2 327
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 38,694评论 1 341
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 34,351评论 4 332
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 39,988评论 3 315
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 30,778评论 0 21
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 32,007评论 1 266
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 46,427评论 2 360
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 43,580评论 2 349

推荐阅读更多精彩内容

  • From shirinsplayground,非常好的机器学习的文章,保存下来,慢慢学习。 https://shi...
    iColors阅读 1,192评论 0 0
  • 好看 https://www-bcf.usc.edu/~gareth/ISL/ISLR%20First%20Pri...
    hhhhhhkkkk阅读 1,708评论 0 0
  • rljs by sennchi Timeline of History Part One The Cognitiv...
    sennchi阅读 7,312评论 0 10
  • **2014真题Directions:Read the following text. Choose the be...
    又是夜半惊坐起阅读 9,437评论 0 23
  • 另外推荐下面这些书,这些书重在让一个成年人学会看待自己看待他人看待社会,能让人活得明白些,家长自己明白如何活着,才...
    qiao老胖阅读 87评论 0 0