HBase中对四个维表进行建表

create table gmall_base_category3  ( id varchar primary key ,info.name varchar, info.category2_id varchar )SALT_BUCKETS = 3create table gmall_base_trademark  ( id varchar primary key ,info.tm_name varchar)SALT_BUCKETS = 3create table gmall_sku_info  ( id varchar primary key ,info.spu_id varchar, info.price varchar,info.sku_name varchar,info.tm_id varchar,info.category3_id varchar,info.create_time varchar,info.category3_name varchar,info.spu_name varchar,info.tm_name varchar  )SALT_BUCKETS = 3create table gmall_spu_info  ( id varchar primary key ,info.spu_name varchar)SALT_BUCKETS = 3

Bean总体结构

创建OrderDetail

scala\com\atguigu\gmall\realtime\bean\OrderDetail.scala

case class OrderDetail(id: Long,order_id:Long,sku_id: Long,order_price: Double,sku_num:Long,sku_name: String,create_time: String,var spu_id: Long,var tm_id: Long,var category3_id: Long,var spu_name: String,var tm_name: String,var category3_name: String)

分别创建sku、spu、商标、类别

scala\com\atguigu\gmall\realtime\bean\dim\BaseCategory3.scala

case class BaseCategory3(id:String ,name:String ,category2_id:String  ) {}

scala\com\atguigu\gmall\realtime\bean\dim\BaseTrademark.scala

case class BaseTrademark(tm_id:String , tm_name:String) {}

scala\com\atguigu\gmall\realtime\bean\dim\SkuInfo.scala

case class SkuInfo(id:String ,spu_id:String ,price:String ,sku_name:String ,tm_id:String ,category3_id:String ,create_time:String,var category3_name:String,var spu_name:String,var tm_name:String) {}

scala\com\atguigu\gmall\realtime\bean\dim\SpuInfo.scala

case class SpuInfo(id:String , spu_name:String  ) {}

BaseCategory3APP

scala\com\atguigu\gmall\realtime\app\dim\BaseCategory3App.scala

import com.alibaba.fastjson.JSON
import com.atguigu.gmall.realtime.bean.dim.BaseCategory3
import com.atguigu.gmall.realtime.utils.{MyKafkaUtil, OffsetManagerUtil}
import org.apache.hadoop.conf.Configuration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}object BaseCategory3App {def main(args: Array[String]): Unit = {val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("dim_base_category3_app")val ssc = new StreamingContext(sparkConf, Seconds(5))val topic = "ODS_T_BASE_CATEGORY3";val groupId = "base_category3_group"/  偏移量处理///val offset: Map[TopicPartition, Long] = OffsetManagerUtil.getOffset(groupId, topic)var inputDstream: InputDStream[ConsumerRecord[String, String]] = null// 判断如果从redis中读取当前最新偏移量 则用该偏移量加载kafka中的数据  否则直接用kafka读出默认最新的数据if (offset != null && offset.size > 0) {inputDstream = MyKafkaUtil.getKafkaStream(topic, ssc, offset, groupId)} else {inputDstream = MyKafkaUtil.getKafkaStream(topic, ssc, groupId)}//取得偏移量步长var offsetRanges: Array[OffsetRange] = nullval inputGetOffsetDstream: DStream[ConsumerRecord[String, String]] = inputDstream.transform { rdd =>offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRangesrdd}// 转换结构val objectDstream: DStream[BaseCategory3] = inputGetOffsetDstream.map { record =>val jsonStr: String = record.value()val obj: BaseCategory3 = JSON.parseObject(jsonStr, classOf[BaseCategory3])obj}// 存储到HBase中objectDstream.foreachRDD{rdd=>import org.apache.phoenix.spark._rdd.saveToPhoenix("GMALL_BASE_CATEGORY3",Seq("ID", "NAME", "CATEGORY2_ID" ),new Configuration,Some("hadoop102,hadoop103,hadoop104:2181"))OffsetManagerUtil.saveOffset(groupId, topic, offsetRanges)}ssc.start()ssc.awaitTermination()}}

BaseTrademarkApp

scala\com\atguigu\gmall\realtime\app\dim\BaseTrademarkApp.scala

import com.alibaba.fastjson.JSON
import com.atguigu.gmall.realtime.bean.dim.BaseTrademark
import com.atguigu.gmall.realtime.utils.{MyKafkaUtil, OffsetManagerUtil}
import org.apache.hadoop.conf.Configuration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}object BaseTrademarkApp {def main(args: Array[String]): Unit = {val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("dim_base_trademark_app")val ssc = new StreamingContext(sparkConf, Seconds(5))val topic = "ODS_T_BASE_TRADEMARK";val groupId = "dim_base_trademark_group"/  偏移量处理///val offset: Map[TopicPartition, Long] = OffsetManagerUtil.getOffset(groupId, topic)var inputDstream: InputDStream[ConsumerRecord[String, String]] = null// 判断如果从redis中读取当前最新偏移量 则用该偏移量加载kafka中的数据  否则直接用kafka读出默认最新的数据if (offset != null && offset.size > 0) {inputDstream = MyKafkaUtil.getKafkaStream(topic, ssc, offset, groupId)} else {inputDstream = MyKafkaUtil.getKafkaStream(topic, ssc, groupId)}//取得偏移量步长var offsetRanges: Array[OffsetRange] = nullval inputGetOffsetDstream: DStream[ConsumerRecord[String, String]] = inputDstream.transform { rdd =>offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRangesrdd}val objectDstream: DStream[BaseTrademark] = inputGetOffsetDstream.map { record =>val jsonStr: String = record.value()val obj: BaseTrademark = JSON.parseObject(jsonStr, classOf[BaseTrademark])obj}objectDstream.foreachRDD{rdd=>import org.apache.phoenix.spark._rdd.saveToPhoenix("GMALL_BASE_TRADEMARK",Seq("ID", "TM_NAME"  ),new Configuration,Some("hadoop102,hadoop103,hadoop104:2181"))OffsetManagerUtil.saveOffset(groupId, topic, offsetRanges)}ssc.start()ssc.awaitTermination()}
}

SkuInfoApp

scala\com\atguigu\gmall\realtime\app\dim\SkuInfoApp.scala

import com.alibaba.fastjson.{JSON, JSONObject}
import com.atguigu.gmall.realtime.bean.dim.SkuInfo
import com.atguigu.gmall.realtime.utils.{MyKafkaUtil, OffsetManagerUtil, PhoenixUtil}
import org.apache.hadoop.conf.Configuration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}object SkuInfoApp {def main(args: Array[String]): Unit = {val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("dim_sku_info_app")val ssc = new StreamingContext(sparkConf, Seconds(5))val topic = "ODS_T_SKU_INFO";val groupId = "dim_sku_info_group"/  偏移量处理///val offset: Map[TopicPartition, Long] = OffsetManagerUtil.getOffset(groupId, topic)var inputDstream: InputDStream[ConsumerRecord[String, String]] = null// 判断如果从redis中读取当前最新偏移量 则用该偏移量加载kafka中的数据  否则直接用kafka读出默认最新的数据if (offset != null && offset.size > 0) {inputDstream = MyKafkaUtil.getKafkaStream(topic, ssc, offset, groupId)} else {inputDstream = MyKafkaUtil.getKafkaStream(topic, ssc, groupId)}//取得偏移量步长var offsetRanges: Array[OffsetRange] = nullval inputGetOffsetDstream: DStream[ConsumerRecord[String, String]] = inputDstream.transform { rdd =>offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRangesrdd}val objectDstream: DStream[SkuInfo] = inputGetOffsetDstream.map { record =>val jsonStr: String = record.value()val obj: SkuInfo = JSON.parseObject(jsonStr, classOf[SkuInfo])obj}// 对多个维度表进行联立val skuInfoDstream: DStream[SkuInfo] = objectDstream.transform { rdd =>if (rdd.count() > 0) {//category3val category3Sql = "select id ,name from gmall_base_category3" //driver  周期性执行val category3List: List[JSONObject] = PhoenixUtil.queryList(category3Sql)val category3Map: Map[String, JSONObject] = category3List.map(jsonObj => (jsonObj.getString("ID"), jsonObj)).toMap//tm_nameval tmSql = "select id ,tm_name  from gmall_base_trademark"val tmList: List[JSONObject] = PhoenixUtil.queryList(tmSql)val tmMap: Map[String, JSONObject] = tmList.map(jsonObj => (jsonObj.getString("ID"), jsonObj)).toMap// spuval spuSql = "select id ,spu_name from gmall_spu_info" // spuval spuList: List[JSONObject] = PhoenixUtil.queryList(spuSql)val spuMap: Map[String, JSONObject] = spuList.map(jsonObj => (jsonObj.getString("ID"), jsonObj)).toMap// 汇总到一个list 广播这个mapval dimList = List[Map[String, JSONObject]](category3Map, tmMap, spuMap)val dimBC: Broadcast[List[Map[String, JSONObject]]] = ssc.sparkContext.broadcast(dimList)val skuInfoRDD: RDD[SkuInfo] = rdd.mapPartitions { skuInfoItr => //exval dimList: List[Map[String, JSONObject]] = dimBC.value //接收bcval category3Map: Map[String, JSONObject] = dimList(0)val tmMap: Map[String, JSONObject] = dimList(1)val spuMap: Map[String, JSONObject] = dimList(2)val skuInfoList: List[SkuInfo] = skuInfoItr.toListfor (skuInfo <- skuInfoList) {val category3JsonObj: JSONObject = category3Map.getOrElse(skuInfo.category3_id, null) //从map中寻值if (category3JsonObj != null) {skuInfo.category3_name = category3JsonObj.getString("NAME")}val tmJsonObj: JSONObject = tmMap.getOrElse(skuInfo.tm_id, null) //从map中寻值if (tmJsonObj != null) {skuInfo.tm_name = tmJsonObj.getString("TM_NAME")}val spuJsonObj: JSONObject = spuMap.getOrElse(skuInfo.spu_id, null) //从map中寻值if (spuJsonObj != null) {skuInfo.spu_name = spuJsonObj.getString("SPU_NAME")}}skuInfoList.toIterator}skuInfoRDD} else {rdd}}skuInfoDstream.foreachRDD { rdd =>import org.apache.phoenix.spark._rdd.saveToPhoenix("GMALL_SKU_INFO", Seq("ID", "SPU_ID", "PRICE", "SKU_NAME", "TM_ID", "CATEGORY3_ID", "CREATE_TIME", "CATEGORY3_NAME", "SPU_NAME", "TM_NAME"), new Configuration, Some("hadoop102,hadoop103,hadoop104:2181"))OffsetManagerUtil.saveOffset(groupId, topic, offsetRanges)}ssc.start()ssc.awaitTermination()}
}

SpuInfoApp

scala\com\atguigu\gmall\realtime\app\dim\SpuInfoApp.scala

import com.alibaba.fastjson.JSON
import com.atguigu.gmall.realtime.bean.dim.SpuInfo
import com.atguigu.gmall.realtime.utils.{MyKafkaUtil, OffsetManagerUtil}
import org.apache.hadoop.conf.Configuration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}object SpuInfoApp {def main(args: Array[String]): Unit = {val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("dim_spu_info_app")val ssc = new StreamingContext(sparkConf, Seconds(5))val topic = "ODS_T_SPU_INFO";val groupId = "dim_spu_info_group"/  偏移量处理///val offset: Map[TopicPartition, Long] = OffsetManagerUtil.getOffset(groupId, topic)var inputDstream: InputDStream[ConsumerRecord[String, String]] = null// 判断如果从redis中读取当前最新偏移量 则用该偏移量加载kafka中的数据  否则直接用kafka读出默认最新的数据if (offset != null && offset.size > 0) {inputDstream = MyKafkaUtil.getKafkaStream(topic, ssc, offset, groupId)//startInputDstream.map(_.value).print(1000)} else {inputDstream = MyKafkaUtil.getKafkaStream(topic, ssc, groupId)}//取得偏移量步长var offsetRanges: Array[OffsetRange] = nullval inputGetOffsetDstream: DStream[ConsumerRecord[String, String]] = inputDstream.transform { rdd =>offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRangesrdd}val objectDstream: DStream[SpuInfo] = inputGetOffsetDstream.map { record =>val jsonStr: String = record.value()val obj: SpuInfo = JSON.parseObject(jsonStr, classOf[SpuInfo])obj}objectDstream.foreachRDD{rdd=>import org.apache.phoenix.spark._rdd.saveToPhoenix("GMALL_SPU_INFO",Seq("ID", "SPU_NAME"  ),new Configuration,Some("hadoop102,hadoop103,hadoop104:2181"))OffsetManagerUtil.saveOffset(groupId, topic, offsetRanges)}ssc.start()ssc.awaitTermination()}
}

OrderDetailApp

这里没有往es写,跟orderInfo一样因为还没创建模板,只是注释掉了

scala\com\atguigu\gmall\realtime\app\dw\OrderDetailApp.scala

import java.text.SimpleDateFormat
import java.util.Dateimport com.alibaba.fastjson.serializer.SerializeConfig
import com.alibaba.fastjson.{JSON, JSONObject}
import com.atguigu.gmall.realtime.bean.OrderDetail
import com.atguigu.gmall.realtime.utils.{MyKafkaSinkUtil, MyKafkaUtil, OffsetManagerUtil, PhoenixUtil}
import org.apache.hadoop.conf.Configuration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}object OrderDetailApp {def main(args: Array[String]): Unit = {val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("dw_order_detail_app")val ssc = new StreamingContext(sparkConf, Seconds(5))val topic = "ODS_T_ORDER_DETAIL";val groupId = "dw_order_detail_group"/  偏移量处理///val offset: Map[TopicPartition, Long] = OffsetManagerUtil.getOffset(groupId, topic)var inputDstream: InputDStream[ConsumerRecord[String, String]] = null// 判断如果从redis中读取当前最新偏移量 则用该偏移量加载kafka中的数据  否则直接用kafka读出默认最新的数据if (offset != null && offset.size > 0) {inputDstream = MyKafkaUtil.getKafkaStream(topic, ssc, offset, groupId)//startInputDstream.map(_.value).print(1000)} else {inputDstream = MyKafkaUtil.getKafkaStream(topic, ssc, groupId)}//取得偏移量步长var offsetRanges: Array[OffsetRange] = nullval inputGetOffsetDstream: DStream[ConsumerRecord[String, String]] = inputDstream.transform { rdd =>offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRangesrdd}/  业务处理///val orderDetailDstream: DStream[OrderDetail] = inputGetOffsetDstream.map { record =>val jsonString: String = record.value()val orderDetail: OrderDetail = JSON.parseObject(jsonString,classOf[OrderDetail])orderDetail}/// 合并 商品信息val orderDetailWithSkuDstream: DStream[OrderDetail] = orderDetailDstream.mapPartitions { orderDetailItr =>val orderDetailList: List[OrderDetail] = orderDetailItr.toListif(orderDetailList.size>0) {val skuIdList: List[Long] = orderDetailList.map(_.sku_id)val sql = "select id ,tm_id,spu_id,category3_id,tm_name ,spu_name,category3_name  from gmall_sku_info  where id in ('" + skuIdList.mkString("','") + "')"val skuJsonObjList: List[JSONObject] = PhoenixUtil.queryList(sql)val skuJsonObjMap: Map[Long, JSONObject] = skuJsonObjList.map(skuJsonObj => (skuJsonObj.getLongValue("ID"), skuJsonObj)).toMapfor (orderDetail <- orderDetailList) {val skuJsonObj: JSONObject = skuJsonObjMap.getOrElse(orderDetail.sku_id, null)orderDetail.spu_id = skuJsonObj.getLong("SPU_ID")orderDetail.spu_name = skuJsonObj.getString("SPU_NAME")orderDetail.tm_id = skuJsonObj.getLong("TM_ID")orderDetail.tm_name = skuJsonObj.getString("TM_NAME")orderDetail.category3_id = skuJsonObj.getLong("CATEGORY3_ID")orderDetail.category3_name = skuJsonObj.getString("CATEGORY3_NAME")}}orderDetailList.toIterator}orderDetailWithSkuDstream.cache()orderDetailWithSkuDstream.print(1000)/*         //写入es//   println("订单数:"+ rdd.count())orderDetailWithSkuDstream.foreachRDD{rdd=>rdd.foreachPartition{orderDetailItr=>val orderDetailList: List[OrderDetail] = orderDetailItr.toListfor (orderDetail <- orderDetailList ) {MyKafkaSinkUtil.send("DW_ORDER_DETAIL",orderDetail.order_id.toString,JSON.toJSONString(orderDetail,new SerializeConfig(true)))}}OffsetManagerUtil.saveOffset(groupId, topic, offsetRanges)}*/OffsetManagerUtil.saveOffset(groupId, topic, offsetRanges)ssc.start()ssc.awaitTermination()}
}

导入维表

bin/maxwell-bootstrap --user maxwell  --password 123123 --host hadoop102  --database spark_gmall  --table base_category3  --client_id maxwell_1bin/maxwell-bootstrap --user maxwell  --password 123123 --host hadoop102  --database spark_gmall  --table base_trademark  --client_id maxwell_1bin/maxwell-bootstrap --user maxwell  --password 123123 --host hadoop102  --database spark_gmall  --table sku_info  --client_id maxwell_1bin/maxwell-bootstrap --user maxwell  --password 123123 --host hadoop102  --database spark_gmall  --table spu_info  --client_id maxwell_1

base_category3

base_trademark

sku_info

spu_info

OrderDetailApp测试

可以看到倒数第三个字段并没有提取成功,后期再更改

Spark实施项目第七天-创建dw层订单明细表且与sku、spu、商标、类别进行联立相关推荐

  1. 实施项目--为什么开发人员一直在抱怨需求变动

    几年前的某个时候,公司大伙都等着下班我却等着晚上加班,因为产品经理对产品的某个功能进行了调整和修改,我必须加班将其修改完善.对于这种事情我已经数不清了,产品经理的每一次变动都得让我们技术部门的同学们加 ...

  2. SAP实施项目中采购员在非生产性采购申请审批流中的角色安排

    SAP实施项目中采购员在非生产性采购申请审批流中的角色安排 所谓非生产性物料,也叫间接物料,也有些企业称之为MRO物料.它主要包括而不限于如下物料:办公用品,劳保用品,备品备件,服务,固定资产等等.这 ...

  3. 七天学会ASP.NET MVC(七)——创建单页应用

    目录 引言 最后一篇学什么 实验32-整理项目组织结构 关于实验32 实验33--创建单页应用--第一部分-安装 什么是Areas? 关于实验33 实验34--创建单页应用--第二部分-显示Emplo ...

  4. 从零开始写项目第七篇【搭建Linux环境】

    tags: 从零开发项目, title: 从零开始写项目第七篇[搭建Linux环境] 使用SSH连接Linux环境 经过十多天的时间,我的网站备案终于完成了...接下来我就收到了阿里云的邮件.它让我在 ...

  5. 五分钟教你使用vue-cli3创建项目(三种创建方式,小白入门必看)

    五分钟教你使用vue-cli3创建项目(三种创建方式,小白入门必看) 一.搭建vue环境 安装Nodejs 官网下载Nodejs,如果希望稳定的开发环境则下LTS(Long Time Support) ...

  6. 联系人管理-客户拜访记录| CRM客户关系管理系统项目 实战七(Struts2+Spring+Hibernate)解析+源代码

    联系人管理-客户拜访记录| CRM客户关系管理系统项目 实战七(Struts2+Spring+Hibernate)解析+源代码 客户拜访记录的列表的显示, 客户拜访记录的保存, 客户拜访记录查询(条件 ...

  7. gulp构建项目(七):gulp-uglify压缩js以及检查js语法错误

    需求分析:将js文件压缩成一行,减少js文件的大小.压缩过程中如果遇到js语法错误,将会报错并且压缩会被终止.所以我们需要: 在开发环境下,编写js代码时,要监听并检查js文件的语法,避免语法错误导致 ...

  8. 商城项目(七)整合RabbitMQ实现延迟消息

    商城项目(七)整合RabbitMQ实现延迟消息 本文主要整合RabbitMQ实现延迟消息的过程,以发送延迟消息取消超时订单为例. 环境配置 RabbitMQ RabbitMQ是一个被广泛使用的开源消息 ...

  9. 基于Spark的机器学习实践 (七) - 回归算法

    0 相关源码 1 回归分析概述 1.1 回归分析介绍 ◆ 回归与分类类似,只不过回归的预测结果是连续的,而分类的预测结果是离散的 ◆ 如此,使得很多回归与分类的模型可以经过改动而通用 ◆ 因此对于回归 ...

最新文章

  1. php执行删除语句代码,ThinkPHP之数据删除和执行原生SQL语句
  2. 关于移动端 触摸事件导致子元素不能绑定事件
  3. 浓烟滚滚!某市联通集体断网,谁的锅?
  4. 博客园使用highlight.js对代码进行高亮,并实现自定义关键词高亮
  5. 在国外当程序员是什么体验?
  6. 职称计算机xp练习题,职称计算机考试模块WindowsXP练习题(1)
  7. Java-Volatile的实现(待删除)
  8. 怎么查电脑系统版本_重装系统PE内找不到硬盘怎么办?只需要这些设置即可…...
  9. 诺基亚接连巨亏:死守塞班难学摩托罗拉
  10. IMDB TOP250电影介绍(下)
  11. 服务器编程之路:进无止境(下)
  12. 【历史上的今天】12 月 20 日:苹果收购 NeXT;苏联超级计算机先驱诞生;《绝地求生》发布
  13. Signal to Noise Ratio——信噪比
  14. PFX提取 cer 提取 公钥和私钥及PHP读取cer 文件和pfx文件证书
  15. 计算机二进制补位是什么,2、二进制
  16. Asp.net WebApi跨域_se7en3_新浪博客
  17. 你可能不知道的 new.target
  18. 360云盘关闭:一个360系列粉的吐槽
  19. java和连接数据库的驱动_java中连接各数据库的驱动类和连接方式
  20. 定积分以及变积分上限函数(附代码)

热门文章

  1. python制作自动按键精灵_python,PyAutoGUI,自动操作鼠标键盘,类似按键精灵
  2. 实现机器人自主定位导航必解决的三大问题
  3. 这些年,我“端”掉的软件测试培训机构
  4. BZOJ 3091: 城市旅行 LCT
  5. 爬取b站《守护解放西》弹幕
  6. 请说说你还使用过哪些视频后期软件?说说优缺点?
  7. 微盛·企微管家营销变现的应用场景
  8. matlab官网大纲,系统仿真与Matlab-教学大纲模板(2015版)
  9. 升级chrome后字体模糊的解决办法
  10. opencv reshape 深拷贝 浅拷贝之坑