趁有时间整理一下毕业论文的模型代码,主要用到的算法有XGBoost,SVM,Ensemble learning,LR,RF,NN,K-近邻;

  1. 构建的初始信贷风险评估指标体系指标太多,信息冗余容易出现过拟合的状况,此处精简指标主要用到的是XGBoost的特征重要性排序,选择它是因为可以通过加入正则项控制模型复杂度,加快算法训练速度,提升模型运行效率。
library(caret)
input39 <- read_excel("C:/Users/Libby/OneDrive/Data for thesis/定稿/input39.xlsx")
sum(is.na(input39))
input39[is.na(input39)]<-0#缺失值用0填补
scainput<-scale(input39[,2:41]) #标准化
input<-data.frame(input39[,1],scainput)
#均衡化,二分类数据分布不均衡很影响分类结果,做一个均衡化处理
library(DMwR)
table(input$Y)
Y<-as.factor(input$Y)
finput<-data.frame(Y,input[,2:41])
binput<-SMOTE(Y~.,finput,perc.over = 200,perc.under = 160)
table(binput$Y)
#xgboost
library(xgboost)
library(Matrix)
library(Ckmeans.1d.dp)
set.seed(100)
sam<-sample(nrow(binput),nrow(binput)*0.7)#划分训练集和测试集
xtr<-binput[sam,]
xte<-binput[-sam,]
xdtr<-data.matrix(xtr[,-1])#训练集解释变量矩阵
ydtr<-xtr[,1]#被解释变量
dtr<-xgb.DMatrix(data = xdtr,label=ydtr)
xdte<-data.matrix(xte[,-1])
ydte<-xte[,1]
dte<-xgb.DMatrix(data = xdte,label=ydte)
xgb<-xgboost(data = dtr,nrounds = 5)
importance<-xgb.importance(model = xgb)
xgb.plot.importance(importance)
inpnames<-inputchinese[,-1]#画图需要将变量名字转换为中文
inp<-xgb.importance(names(inpnames),model = xgb)
xgb.plot.importance(inp)
pre_xgb<-round(predict(xgb,newdata = dte))
table(pre_xgb,xte$Y)
xgb.plot.importance(inp)
xdtr1<-data.matrix(xtr[,-c(1,6,26,35,17,7,14,13,29,3,5,19,40,10,27,32,30,31,34,4,9,16,18,23,33,38)])#剔除不重要变量
dtr1<-xgb.DMatrix(data = xdtr1,label=ydtr)
xdte1<-data.matrix(xte[,-c(1,6,26,35,17,7,14,13,29,3,5,19,40,10,27,32,30,31,34,4,9,16,18,23,33,38)])
dte1<-xgb.DMatrix(data = xdte1,label=ydte)
xgb1<-xgboost(data = dtr1,nrounds = 5)
importance1<-xgb.importance(model = xgb1)
xgb.plot.importance(importance1)#
pre_xgb1<-round(predict(xgb1,newdata = dte1))
table(pre_xgb1,xte$Y)#可对比剔除变量后的分类结果
#SVM核函数选择
library(e1071)
library(pROC)
library(ggplot2)
library(foreach)
library(iterators)
library(parallel)
library(doParallel)
library(rpart)
library(rpart.plot)
library(DMwR)
#剔除变量后的新数据集
svminput<-binput[,-c(6,26,35,17,7,14,13,29,3,5,19,40,10,27,32,30,31,34,4,9,16,18,23,33,38)]
names(svminput)[2:16]<-c("X1","X2","X3","X4","X5","X6","X7","X8","X9","X10","X11","X12","X13","X14","X15")
set.seed(100)
sam<-sample(nrow(svminput),nrow(svminput)*0.7)
traindata<-svminput[sam,]
testdata<-svminput[-sam,]
#线性核函数
tsline<-tune.svm(Y~.,data = traindata,kernel="linear",cost = 2^(0:4))
summary(tsline)
svmline<-svm(Y~.,data = traindata,kernel="linear",cost=1)
preline<-predict(svmline,testdata[,-1])
tableline<-table(preline,testdata$Y)
confusionMatrix(tableline)
lineroc<-roc(testdata$Y,factor(preline,ordered = T))
plot(lineroc,print.auc=T,auc.polygon=T,grid=c(0.2,0.2),grid.col=c("grey","grey"),max.auc.polygon=T,auc.polygon.col="lightgrey",print.thres=T,main="线性核函数ROC曲线")
#多项式核函数
tspoly<-tune.svm(Y~.,data = traindata,kernel="polynomial",gamma = 2^(-2:2),cost = 2^(-1:4),degree = 2^(0:3))
summary(tspoly)
svmpoly<-svm(Y~.,data = traindata,kernel="polynomial",gamma = 0.5,cost = 4,degree =4)
prepoly<-predict(svmpoly,testdata[,-1])
tablepoly<-table(prepoly,testdata$Y)
confusionMatrix(tablepoly)
polyroc<-roc(testdata$Y,factor(prepoly,ordered = T))
plot(polyroc,print.auc=T,auc.polygon=T,grid=c(0.2,0.2),grid.col=c("grey","grey"),max.auc.polygon=T,auc.polygon.col="lightgrey",print.thres=T,main="多项式核函数ROC曲线")
#高斯核函数
tsbf<-tune.svm(Y~.,data = traindata,kernel="radial",gamma=2^(-2:3),cost = 2^(-2:5))
summary(tsbf)
svmbf<-svm(Y~.,data = traindata,kernel="radial",gamma=0.5,cost = 8)
prebf<-predict(svmbf,testdata[,-1])
tablebf<-table(prebf,testdata$Y)
confusionMatrix(tablebf)
bfroc<-roc(testdata$Y,factor(prebf,ordered = T))
plot(bfroc,print.auc=T,auc.polygon=T,grid=c(0.2,0.2),grid.col=c("grey","grey"),max.auc.polygon=T,auc.polygon.col="lightgrey",print.thres=T,main="高斯核函数ROC曲线")
#输出数据
  1. R中没有可用的集成算法包,因此使用Python完成集成部分
import sys
import math
import sklearn
import operator
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from pandas import DataFrame
from scipy.special import comb
from sklearn import model_selection
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.model_selection import KFold
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score
from sklearn.pipeline import _name_estimators
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import classification_report
from mlxtend.classifier import StackingCVClassifier,StackingClassifier
svmtrain=pd.read_excel("D:/svmtrain.xlsx")
print(svmtrain.info())
print(svmtrain)
train=pd.DataFrame(svmtrain)
trainy=train.loc[:,"Y"]
trainx=train.drop(["Y"],axis=1)
svmtest=pd.read_excel("D:/svmtest.xlsx")
print(svmtest.info())
test=pd.DataFrame(svmtest)
testy=test.loc[:,"Y"]
testx=test.drop(["Y"],axis=1)
from sklearn import svm
from sklearn.metrics import roc_curve,auc
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
svm=svm.SVC(gamma="auto",random_state=30)
svm.fit(trainx,trainy)
yscore1=svm.fit(trainx,trainy).decision_function(testx)
fpr,tpr,threshold=roc_curve(testy,yscore1)
roc_auc=auc(fpr,tpr)
print(auc(fpr,tpr))
print(svm.score(testx,testy))
ypred1=svm.predict(testx)
print(classification_report(testy,ypred1,target_names=["class0","class1"]))
confusion_matrix(testy,ypred1)
from sklearn import svm
scores=["roc_auc","precision"]
parameters={"gamma":[1e-3,1e-4],"C":[1,10,100]}
parameters={"gamma":[1,0.1,0.01],"C":[1,100,1000,3000]}
parameters={"gamma":[100,10,1],"C":range(1000,5001,1000)}
parameters={"gamma":[0.5,1,3],"C":range(1000,2701,500)}
for score in scores:print("#Tuning hyper-parameters for %s"% score)grid=GridSearchCV(svm.SVC(kernel="rbf",random_state=30),parameters,cv=5,scoring=score)grid.fit(trainx,trainy)print(grid.best_params_,grid.best_score_)
svmrbf=svm.SVC(kernel="rbf",C=1000,gamma=0.5,random_state=30)
svmrbf.fit(trainx,trainy)
yscore2=svmrbf.fit(trainx,trainy).decision_function(testx)
fpr,tpr,threshold=roc_curve(testy,yscore2)
roc_auc=auc(fpr,tpr)
print(auc(fpr,tpr))
print(svmrbf.score(testx,testy))
ypred2=svmrbf.predict(testx)
print(classification_report(testy,ypred2,target_names=["class0","class1"]))
confusion_matrix(testy,ypred2)
plt.figure()
lw=2
plt.figure(figsize=(10,10))
plt.plot(fpr,tpr,lw=lw,label="ROC curve(area=%0.4f)"%roc_auc)
plt.plot([0,1],[0,1],color="navy",lw=lw,linestyle="--")
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.xlabel("False Positive Rate")
plt.ylabel("Ture Positive Rate")
plt.title("svm ROC Curve")
plt.legend(loc="lower right")
plt.show()
#Bagging集成
from sklearn.ensemble import BaggingClassifier
svmbag= BaggingClassifier(svmrbf,random_state=30)
svmbag.fit(trainx,trainy)
yscore5=svmbag.predict_proba(testx)[:,1]
fpr,tpr,threshold=roc_curve(testy,yscore5)
roc_auc=auc(fpr,tpr)
print(auc(fpr,tpr))
print(svmbag.score(testx,testy))
ypred5=svmbag.predict(testx)
print(classification_report(testy,ypred5,target_names=["class0","class1"]))
confusion_matrix(testy,ypred5)
plt.figure()
lw=2
plt.figure(figsize=(10,10))
plt.plot(fpr,tpr,lw=lw,label="ROC curve(area=%0.4f)"%roc_auc)
plt.plot([0,1],[0,1],color="navy",lw=lw,linestyle="--")
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.xlabel("False Positive Rate")
plt.ylabel("Ture Positive Rate")
plt.title("Bagging ROC Curve")
plt.legend(loc="lower right")
plt.show()
# Adasvm集成
from sklearn.ensemble import AdaBoostClassifier
Adasvm=AdaBoostClassifier(svmrbf,algorithm="SAMME")
Adasvm.fit(trainx,trainy)
yscore3= Adasvm.fit(trainx,trainy).decision_function(testx)
fpr,tpr,threshold=roc_curve(testy,yscore3)
roc_auc=auc(fpr,tpr)
print(auc(fpr,tpr))
print(Adasvm.score(testx,testy))
ypred3=Adasvm.predict(testx)
print(classification_report(testy,ypred3,target_names=["class0","class1"]))
confusion_matrix(testy,ypred3)
from sklearn import svm
scores=["roc_auc","precision"]
parameters={"n_estimators":[10,50,100],"learning_rate":[0.3,0.5]}
parameters={"n_estimators":[200,400,600],"learning_rate":[0.7,0.8]}
for score in scores:print("#Tuning hyper-parameters for %s"% score)grid=GridSearchCV(AdaBoostClassifier                  (svmrbf,algorithm="SAMME"),parameters,cv=5,scoring=score)grid.fit(trainx,trainy)print(grid.best_params_,grid.best_score_)
Adasvm=AdaBoostClassifier(svmrbf,algorithm="SAMME",n_estimators=200,learning_rate=0.7)
Adasvm.fit(trainx,trainy)
yscore4= Adasvm.fit(trainx,trainy).decision_function(testx)
fpr,tpr,threshold=roc_curve(testy,yscore4)
roc_auc=auc(fpr,tpr)
print(auc(fpr,tpr))
print(Adasvm.score(testx,testy))
ypred4=Adasvm.predict(testx)
print(classification_report(testy,ypred4,target_names=["class0","class1"]))
confusion_matrix(testy,ypred4)
plt.figure()
lw=2
plt.figure(figsize=(10,10))
plt.plot(fpr,tpr,lw=lw,label="ROC curve(area=%0.4f)"%roc_auc)
plt.plot([0,1],[0,1],color="navy",lw=lw,linestyle="--")
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.xlabel("False Positive Rate")
plt.ylabel("Ture Positive Rate")
plt.title("AdaBoost ROC Curve")
plt.legend(loc="lower right")
plt.show()
#Stacking
from sklearn import svm
svm=svm.SVC(gamma="auto",random_state=30)
svm.fit(trainx,trainy)
yscore6=svm.fit(trainx,trainy).decision_function(testx)
fpr,tpr,threshold=roc_curve(testy,yscore6)
print(auc(fpr,tpr))
print(svm.score(testx,testy))
from sklearn import svm
scores=["roc_auc","precision"]
#线性核函数
parameters={"C":[0.1,1,10,100,1000]}
parameters={"C":range(1000,5001,1000)}
for score in scores:print("#Tuning hyper-parameters for %s"% score)grid=GridSearchCV(svm.SVC(kernel="linear",random_state=30),parameters,cv=5,scoring=score)grid.fit(trainx,trainy)print(grid.best_params_,grid.best_score_)
svmlinear=svm.SVC(kernel="linear",C=5000,random_state=30)
svmlinear.fit(trainx,trainy)
yscore7=svmlinear.fit(trainx,trainy).decision_function(testx)
fpr,tpr,threshold=roc_curve(testy,yscore7)
print(auc(fpr,tpr))
print(svmlinear.score(testx,testy))
#多项式核函数
parameters={"gamma":[1e-3,1e-4],"coef0":[1,10,1000]}
parameters={"gamma":[0.1,0.001],"coef0":[100,1000,3000]}
parameters={"gamma":[0.001,0.1,1],"coef0":[50,100,1000]}
parameters={"gamma":[1e-3,1e-4,0.1],"degree":[2,3,4]}
for score in scores:print("#Tuning hyper-parameters for %s"% score)grid=GridSearchCV(svm.SVC(kernel="poly",coef0=100,random_state=30),parameters,cv=5,scoring=score)grid.fit(trainx,trainy)print(grid.best_params_,grid.best_score_)
svmpoly=svm.SVC(kernel="poly",gamma=0.1,degree=3,coef0=100,random_state=30)
svmpoly.fit(trainx,trainy)
yscore8=svmpoly.fit(trainx,trainy).decision_function(testx)
fpr,tpr,threshold=roc_curve(testy,yscore8)
print(auc(fpr,tpr))
print(svmpoly.score(testx,testy))
ker=svm.SVC(kernel="linear",C=5000,random_state=30,probability=True)
poly=svm.SVC(kernel="poly",gamma=0.1,degree=3,coef0=100,random_state=30,probability=True)
rbf=svm.SVC(kernel="rbf",C=1000,gamma=3,random_state=30,probability=True)
svmstacking=StackingClassifier(classifiers=[rbf,poly],meta_classifier=ker,use_probas=True,verbose=3)
svmstacking.fit(trainx,trainy)
yscore9=svmstacking.predict_proba(testx)[:,1]
fpr,tpr,threshold=roc_curve(testy,yscore9)
roc_auc=auc(fpr,tpr)
print(auc(fpr,tpr))
print(svmstacking.score(testx,testy))
ypred6=svmstacking.predict(testx)
print(classification_report(testy,ypred6,target_names=["class0","class1"]))
confusion_matrix(testy,ypred6)
plt.figure()
lw=2
plt.figure(figsize=(10,10))
plt.plot(fpr,tpr,lw=lw,label="ROC curve(area=%0.4f)"%roc_auc)
plt.plot([0,1],[0,1],color="navy",lw=lw,linestyle="--")
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.xlabel("False Positive Rate")
plt.ylabel("Ture Positive Rate")
plt.title("Stacking ROC Curve")
plt.legend(loc="lower right")
plt.show()
#逻辑回归
library(caret)
library(pROC)
lg<-glm(Y~.,data=svmtrain,family = "binomial"(link='logit'))
summary(lg)
testy<-svmtest$Y
prelg<-predict.glm(lg,type = "response",newdata = svmtest)
prelg<-ifelse(prelg>0.5,1,0)
testy<-c(t(testy))
tablelg<-table(testy,prelg)
confusionMatrix(tablelg)
lgroc<-roc(testy,prelg)
plot(lgroc,print.auc=T,auc.polygon=T,grid=c(0.2,0.2),grid.col=c("grey","grey"),max.auc.polygon=T,auc.polygon.col="lightgrey",print.thres=T,main="逻辑回归ROC曲线")
#KNN
library(kknn)
library(pROC)
library(caret)
trainy<-factor(svmtrain$Y)
testy<-factor(svmtest$Y)
svmtrainkn<-data.frame(trainy,svmtrain[,-1])
names(svmtrainkn)[1]<-"Y"
svmtestkn<-data.frame(testy,svmtest[,-1])
names(svmtestkn)[1]<-"Y"
kn<-kknn(Y~.,svmtrainkn,svmtestkn,k=5,distance = 2)
prekn<-fitted(kn)
tablekn<-table(testy,prekn)
confusionMatrix(tablekn)
knn_roc<-roc(testy,factor(prekn,ordered = T))
plot(knn_roc,print.auc=T,auc.polygon=T,grid=c(0.2,0.2),grid.col=c("grey","grey"),max.auc.polygon=T,auc.polygon.col="lightgrey",print.thres=T,main="KnnROC曲线")
#随机森林
library(randomForest)
rate=1
trainy<-svmtrain$Y
trainy<-factor(trainy)
testy<-svmtest$Y
testy<-factor(testy)
n<-ncol(svmtrain)
for(i in 1:(n-1)){set.seed(123) rf<-randomForest(trainy~.,data=svmtrain[,-1],mtry=i,ntree=500)rate[i]<-mean(rf$err.rate)print(rf)}
plot(rate,type = "o",col="grey",xlab = "mtry",ylab = "error rate",main = "RF error rate")
set.seed(123)
rf1<-randomForest(trainy~.,data = svmtrain[,-1],mtry=5,ntree=500)
plot(rf1,main = "num of tree")
rfmodel<-randomForest(trainy~.,data = svmtrain[,-1],mtry=5,ntree=200,importance=T,proximity=T)
impor<-importance(rfmodel)
barplot(rfmodel$importance[,1],main = "变量重要性")
box()
importance(rfmodel,type=1)
prerf<-predict(rfmodel,newdata = svmtest[,-1],probability=T)
tablerf<-table(testy,prerf)
confusionMatrix(tablerf)
rocrf<-roc(testy,factor(prerf,ordered=T))
plot(rocrf,print.auc=T,auc.polygon=T,grid=c(0.2,0.2),grid.col=c("grey","grey"),max.auc.polygon=T,auc.polygon.col="lightgrey",print.thres=T,main="随机森林ROC曲线")
#神经网络
install.packages("neuralnet")
library(neuralnet)
netw<-neuralnet(trainy~.,svmtrain[,-1],hidden = 3)
netw
plot(netw)
prenet<-compute(netw,svmtest[,-1])$net.result
pren<-c("0","1")[apply(prenet,1,which.max)]
tablenetw<-table(testy,pren)
rocnet<-roc(testy,factor(pren,ordered=T))
plot(rocnet,print.auc=T,auc.polygon=T,grid=c(0.2,0.2),grid.col=c("grey","grey"),max.auc.polygon=T,auc.polygon.col="lightgrey",print.thres=T,main="神经网络ROC曲线")
#New Stacking
svmtrain=pd.read_excel("D:/svmtrain.xlsx")
train=pd.DataFrame(svmtrain)
svmtest=pd.read_excel("D:/svmtest.xlsx")
test=pd.DataFrame(svmtest)
trainy=train.loc[:,"Y"]
trainx=train.drop(["Y"],axis=1)
testy=test.loc[:,"Y"]
testx=test.drop(["Y"],axis=1)
clf1=RandomForestClassifier(n_estimators=200,random_state=30)
clf2=GaussianNB()
clf3=KNeighborsClassifier()
lr=LogisticRegression()
sclf=StackingClassifier(classifiers=[clf1,clf2,clf3],meta_classifier=lr)
print("3-fold cross validation:\n")
for clf,label in zip([clf1,clf2,clf3,sclf],["Random Forest","Naive Bayes","KNN","StackingClassifier"]):scores=model_selection.cross_val_score(clf,trainx,trainy,cv=3,scoring="accuracy")print("Accuracy:%0.2f(+/-%0.2f)[%s]"%(scores.mean(),scores.std(),label))
classifier=OneVsRestClassifier(sclf)
y_score1=classifier.fit(trainx,trainy).predict_proba(testx)[:,1]
y_score2=classifier.fit(trainx,trainy).predict_proba(testx)[:,2]
fpr,tpr,threshold=roc_curve(testy,y_score1)
roc_auc=auc(fpr,tpr)
pre=classifier.predict(testx)
print(classification_report(testy,pre,target_names=["class0","class1"]))
#ROC
plt.figure()
lw=2
plt.figure(figsize=(10,10))
plt.plot(fpr,tpr,lw=lw,label="ROC curve(area=%0.4f)"%roc_auc)
plt.plot([0,1],[0,1],color="navy",lw=lw,linestyle="--")
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.xlabel("False Positive Rate")
plt.ylabel("Ture Positive Rate")
plt.title("Stacking ROC Curve")
plt.legend(loc="lower right")
plt.show()

利用集成支持向量机模型评估信贷风险相关推荐

  1. ML之回归预测:利用Lasso、ElasticNet、GBDT等算法构建集成学习算法AvgModelsR对国内某平台上海2020年6月份房价数据集【12+1】进行回归预测(模型评估、模型推理)

    ML之回归预测:利用Lasso.ElasticNet.GBDT等算法构建集成学习算法AvgModelsR对国内某平台上海2020年6月份房价数据集[12+1]进行回归预测(模型评估.模型推理) 目录 ...

  2. ML之回归预测:利用十类机器学习算法(线性回归、kNN、SVM、决策树、随机森林、极端随机树、SGD、提升树、LightGBM、XGBoost)对波士顿数据集回归预测(模型评估、推理并导到csv)

    ML之回归预测:利用十类机器学习算法(线性回归.kNN.SVM.决策树.随机森林.极端随机树.SGD.提升树.LightGBM.XGBoost)对波士顿数据集[13+1,506]回归预测(模型评估.推 ...

  3. 机器学习训练建模、集成模型、模型评估等代码总结(2019.05.21更新)

    这篇博客总结一些有关机器学习的模型代码与评价指标,力求一针见血,复制粘贴即可食用,hhhhhh,不定期更新 代码中使用的案例是pandas构造的dataframe型的数据,变量名为 dataframe ...

  4. (8) 支持向量机(下)(模型评估指标、ROC曲线)

    文章目录 1 二分类SVC的进阶 1.1 参数C的理解进阶 1.2 二分类SVC中的样本不均衡问题:重要参数class_weight 2 SVC的模型评估指标 2.1 混淆矩阵 2.1.1 模型整体效 ...

  5. R语言分类模型:逻辑回归模型LR、决策树DT、推理决策树CDT、随机森林RF、支持向量机SVM、Rattle可视化界面数据挖掘、分类模型评估指标(准确度、敏感度、特异度、PPV、NPV)

    R语言分类模型:逻辑回归模型LR.决策树DT.推理决策树CDT.随机森林RF.支持向量机SVM.Rattle可视化界面数据挖掘.分类模型评估指标(准确度.敏感度.特异度.PPV.NPV) 目录

  6. ML之XGBoost:利用XGBoost算法对波士顿数据集回归预测(模型调参【2种方法,ShuffleSplit+GridSearchCV、TimeSeriesSplitGSCV】、模型评估)

    ML之XGBoost:利用XGBoost算法对波士顿数据集回归预测(模型调参[2种方法,ShuffleSplit+GridSearchCV.TimeSeriesSplitGSCV].模型评估) 目录 ...

  7. DL之CNN:利用自定义DeepConvNet【7+1】算法对mnist数据集训练实现手写数字识别、模型评估(99.4%)

    DL之CNN:利用自定义DeepConvNet[7+1]算法对mnist数据集训练实现手写数字识别.模型评估(99.4%) 目录 输出结果 设计思路 核心代码 输出结果 设计思路 核心代码 netwo ...

  8. R语言编写自定义函数计算分类模型评估指标:准确度、特异度、敏感度、PPV、NPV、数据数据为模型预测后的混淆矩阵、比较多个分类模型分类性能(逻辑回归、决策树、随机森林、支持向量机)

    R语言编写自定义函数计算分类模型评估指标:准确度.特异度.敏感度.PPV.NPV.数据数据为模型预测后的混淆矩阵.比较多个分类模型分类性能(逻辑回归.决策树.随机森林.支持向量机) 目录

  9. 机器学习模型 知乎_机器学习:模型评估之评估方法

    ​机器学习已经成为了人工智能的核心研究领域之一,它的研究动机就是为了让计算机系统具有人的学习能力以便实现人工智能.目前,关于机器学习定义的说法比较多,而被广泛采用的定义是"利用经验来改善计算 ...

  10. Lesson 5.分类模型决策边界与模型评估指标

    Lesson 5.分类模型决策边界与模型评估指标(上) 在逻辑回归的算法基础内容结束之后,我们还需要补充一些关于分类模型的基础知识,包括观察分类模型判别性能的决策边界基本的概念与实现方法,同时也包括对 ...

最新文章

  1. 网络流最大流 Dinic算法模板
  2. java实验2词法分析程序设计
  3. mysql--常用基础命令
  4. 信号量与条件变量的区别
  5. visual studio如何给源码文件添加header信息?(创建者,创建日期等)(License Header Manager插件)
  6. hdu 5491 The Next(数学模拟)
  7. 抓取html的内容,js获取网页选中内容(包含html代码)
  8. 代码实现:给一个不多于5位的正整数,要求:一、求它是几位数,二、逆序打印出各位数字。...
  9. 在hue中操作hive
  10. Total Physical Response TPR
  11. php中global什么意思,php中global和$GLOBALS[]的用法、解释、区别
  12. 查询记录时rs.previous()的使用
  13. Android开发中图表的使用
  14. C++封装SQLite实例六
  15. 刘海屏的MacBook Pro你会买吗?
  16. 图片轮播,鼠标放上去即停止,鼠标移除即播放下一张图片
  17. javascript常用编辑器推荐
  18. 修改form表单action路径
  19. html css特效,15个超酷的CSS3代码特效展示
  20. 通俗解释乔姆斯基文法体系

热门文章

  1. vue-router的参数的传递、懒加载以及嵌套路由的使用
  2. H264/H265码流的编码码率设置
  3. 接收邮件服务器(pop3,邮件接收(POP3或IMAP)服务器是什么
  4. CodeForces 592C (胡搞)
  5. gps测试软件用法,gps测量仪器使用方法及教程
  6. matlab仿真项目心得,Matlab与Simulink系统仿真学习心得
  7. 阿里云商标注册申请智能、顾问和安心区别及选择攻略
  8. 《弃子长安》第四章 晨钟暮鼓
  9. 百度网盘限速破解——Proxyee-down的下载与安装教程
  10. 【论文阅读|浅读】Lemane:Learning Based Proximity Matrix Factorization for Node Embedding