您的位置:首页 > 大数据

十大数据挖掘算法的R语言实现

2016-11-20 11:48 465 查看
iris数据集 iris以鸢尾花的特征作为数据来源,常用在分类操作中。该数据集由3种不同类型的鸢尾花的50个样本数据构成。其中的一个种类与另外两个种类是线性可分离的,后两个种类是非线性可分离的。
library(ggplot2)
summary(iris)
qplot(Petal.Length, Petal.Width, data=iris, color=Species)
1231231:C5.0决策树 先加载所需要的包
library(C50)
library(printr)
1212对iris数据集进行抽样,获得训练样本和测试样本
train.indeces <- sample(1:nrow(iris), 100)
iris.train <- iris[train.indeces, ]
iris.test <- iris[-train.indeces, ]
123123利用C5.0函数对训练样本进行模型训练
model <- C5.0(Species ~ ., data = iris.train)
11对测试样本进行预测
results <- predict(object = model, newdata = iris.test, type = "class")
confusion_matrix=table(results, iris.test$Species)
confusion_matrix
123123计算错误率
error=1-sum(diag(confusion_matrix))/nrow(iris.test)
11预测错误率为0.12 2:K-means 模型建立
library(stats)
library(printr)
model <- kmeans(x = subset(iris, select = -Species), centers = 3)
123123分类性能测试
table(model$cluster, iris$Species)
/   setosa  versicolor  virginica
1   33  0   0
2   17  4   0
3   0   46  50
1234512345
3:Support Vector Machines导入包library(e1071)library(printr)对iris数据集进行抽样,获得训练样本和测试样本train.indeces <- sample(1:nrow(iris), 100)
iris.train <- iris[train.indeces, ]
iris.test <- iris[-train.indeces, ]
123456789101112123456789101112利用C5.0函数对训练样本进行模型训练model <- svm(Species ~ ., data = iris.train)对测试样本进行预测
results <- predict(object = model, newdata = iris.test, type = "class")
confusion_matrix=table(results, iris.test$Species)
confusion_matrixresults/    setosa  versicolor  virginicasetosa  12  0   0versicolor  0   19  0virginica   0   1   18计算错误率error=1-sum(diag(confusion_matrix))/nrow(iris.test)预测错误率为0.02
12345678910111234567891011
4:Apriori
导入包和数据集
library(arules)
library(printr)
data("Adult")
训练模型
rules <- apriori(Adult,
parameter = list(support = 0.4, confidence = 0.7),
appearance = list(rhs = c("race=White", "sex=Male"), default = "lhs"))
获得前五的关联关系
rules.sorted <- sort(rules, by = "lift")
top5.rules <- head(rules.sorted, 5)
as(top5.rules, "data.frame")
rules   support confidence  lift
2   {relationship=Husband} => {sex=Male}    0.4036485   0.9999493   1.495851
12  {marital-status=Married-civ-spouse,relationship=Husband} => {sex=Male}  0.4034028   0.9999492   1.495851
3   {marital-status=Married-civ-spouse} => {sex=Male}   0.4074157   0.8891818   1.330151
4   {marital-status=Married-civ<
f185
/span>-spouse} => {race=White} 0.4105892   0.8961080   1.048027
19  {workclass=Private,native-country=United-States} => {race=White}    0.5433848   0.8804113   1.029669
1234567891011121314151617181912345678910111213141516171819
5:EM算法
library(mclust)
library(printr)
model <- Mclust(subset(iris, select = -Species))
table(model$classification, iris$Species)
/   setosa  versicolor  virginica
1   50  0   0
2   0   50  50
123456789123456789
6:PageRank
PageRank用来计算图中各点的相关程度,其原理是马尔科夫链
library(igraph)
library(dplyr)
library(printr)
生成随机的网络图
g <- random.graph.game(n = 10, p.or.m = 1/4, directed = TRUE)
plot(g)

对每个节点计算rankpage值
pr <- page.rank(g)$vector
df <- data.frame(Object = 1:10, PageRank = pr)
arrange(df, desc(PageRank))
Object  PageRank
10  0.1768655
7   0.1369388
1   0.1263876
4   0.1198167
2   0.1161824
9   0.0891266
6   0.0847579
8   0.0793286
5   0.0390147
3   0.0315813
123456789101112131415161718192021222324123456789101112131415161718192021222324
7:adaboostlibrary(adabag)library(printr)train.indeces <- sample(1:nrow(iris), 100)
iris.train <- iris[train.indeces, ]
iris.test <- iris[-train.indeces, ]模型训练model <- boosting(Species ~ ., data = iris.train)训练结果results <- predict(object = model, newdata = iris.test, type = "class")results$confusionPredicted Class/Observed Class  setosa  versicolor  virginicasetosa  15  0   0versicolor  0   18  4virginica   0   0   13
123456789101112131415123456789101112131415
8:kNNlibrary(class)library(printr)train.indeces <- sample(1:nrow(iris), 100)
iris.train <- iris[train.indeces, ]
iris.test <- iris[-train.indeces, ]模型训练results <- knn(train = subset(iris.train, select = -Species),test = subset(iris.test, select = -Species),cl = iris.train$Species)分类效果table(results, iris.test$Species)results/    setosa  versicolor  virginicasetosa  22  0   0versicolor  0   10  0virginica   0   1   17
1234567891011121314151612345678910111213141516
9:naive bayeslibrary(e1071)library(printr)train.indeces <- sample(1:nrow(iris), 100)
iris.train <- iris[train.indeces, ]
iris.test <- iris[-train.indeces, ]训练集训练模型model <- naiveBayes(x = subset(iris.train, select=-Species), y = iris.train$Species)测试集预测效果results <- predict(object = model, newdata = iris.test,type ="class")table(results, iris.test$Species)results/    setosa  versicolor  virginicasetosa  18  0   0versicolor  0   17  0virginica   0   4   11
123456789101112131415123456789101112131415
10:cart 

library(rpart) 

library(printr) 

train.indeces <- sample(1:nrow(iris), 100) 

iris.train <- iris[train.indeces, ] 

iris.test <- iris[-train.indeces, ] 

训练模型 

model <- rpart(Species ~ ., data = iris.train) 

测试模型 

results <- predict(object = model, newdata = iris.test, type = "class") 

table(results, iris.test$Species) 

results/ setosa versicolor virginica 

setosa 15 0 0 

versicolor 0 16 6 
virginica 0 1 12 
转自:http://blog.csdn.net/cmddds11235/article/details/47724871
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: