功能

实现数据抓取;定时发送邮件

1、数据抓取

新建work.py文件

#!/usr/bin/python3
# pip3 install requests pandas lxml xlsxwriter openpyxl -i https://pypi.tuna.tsinghua.edu.cn/simple
import re
import requests
import random,time
from lxml import html
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
import threading
from queue import Queue
import time
#from func_timeout import FunctionTimedOut, func_timeout
import pandas as pdgQueue = Queue()
headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8","Accept-Encoding": "gzip, deflate","Accept-Language": "en-US,en;q=0.5","Connection": "keep-alive","Host": "blog.csdn.net","Upgrade-Insecure-Requests": "1","User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0"}class MyLog:def __init__(self):import logging,osself.logger = logging.getLogger()self.logger.setLevel(logging.DEBUG)  # Log等级总开关#rq = time.strftime('%Y%m%d', time.localtime(time.time()))logdir = "/var/log/my_log"if not os.path.exists(logdir):os.mkdir(logdir)logfile = logdir + '/message' + '.log'fh = logging.FileHandler(logfile, mode='a')fh.setLevel(logging.INFO)formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")fh.setFormatter(formatter)self.logger.addHandler(fh)def info(self,mes):self.logger.info(mes)returndef debug(self,mes):self.logger.debug(mes)returndef warning(self,mes):self.logger.warning(mes)returndef error(self,mes):self.logger.error(mes)return
log = MyLog()
log.info('started')def get_page_count():url = "https://xyk.cebbank.com/jfmall/search?keywords="res = requests.get(url, headers, timeout=10)res.decoding = 'gbk'log.info(res.text)result = re.compile('<a href="javascript:void(0)" class="next" rel="next">(.*?)</a>',re.S)#result = re.compile('<a href=".*?" class="ep">(.*?)</a>',re.S)page = re.findall(result, res.text)log.info(page)def get_urls(page):global dict_scorerequests.DEFAULT_RETRIES = 15  # 增加重试连接次数s = requests.session()s.keep_alive = False  # 关闭多余连接url0 = "https://xyk.cebbank.com/jfmall/search?keywords=&pageNo=" + str(page)req0 = requests.get(url0, headers, timeout=60)req0.decoding = 'gbk'#result = re.compile(r'class="t1 ">.*? <a target="_blank" title=".*?" href="(.*?)".*? <span class="t2">',re.S)#无re.S只在每一行内匹配result1 = re.compile('<div class="main-item-list-title">.*?<a href="(.*?)" class="text-hover-black js-filter-title"   rel="noopener noreferrer"',re.S)result2 = re.compile('<span class="text-color-red text-font-size-18 text-font-weight-bold">(.*?)</span>',re.S)url = re.findall(result1, req0.text)jifen = re.findall(result2, req0.text)urls  = ["https://xyk.cebbank.com" + u1 for u1 in  url]dict_score = {}for i in range(0,len(urls)):dict_score[urls[i]] = jifen[i]return urlsdef deal_size_color(data):color = ''size = ''if len(data) == 0:color,size = '无','无'if  len(data) == 1:if '色' in data[0]:color = data[0]size = '无'else:size = data[0]color = '无'if len(data) == 2:if '色' in data[0]:color = data[0]size = data[1]else:size = data[0]if '色' in data[1]:color = data[1]else:color = '无'if ',' in color:color = color.replace(',',';')if ',' in size:size = size.replace(',',';')if '"' in size:size = size.replace('"','')return [color,size]def get_data(url):try:global dict_scorerequests.DEFAULT_RETRIES = 15  # 增加重试连接次数s = requests.session()s.keep_alive = False  # 关闭多余连接res = requests.get(url, headers, timeout=60)res.encoding = 'utf-8't1 = html.fromstring(res.text)name = t1.xpath('//div[@class="product-detail-content-title js-itemId"]/text()')[0].strip()duihuan = t1.xpath('//div[@class="text-color-red text-font-weight-bold"]/text()')[0].strip()score = dict_score[url]#color = t1.xpath('//span[@class="meta-title"]/text()')[0].strip()size_col = t1.xpath('//span[contains(@class,"meta-title")]/text()')#[0].strip()sc = deal_size_color(data=size_col)size = sc[1].strip()color = sc[0].strip()get_style = t1.xpath('//span[@class="exchangeWay"]/text()')[0].strip()categorys = t1.xpath('//a[@class="js-category-select"]/text()')tt = [i.strip() for i in categorys if i.strip()]category = tt[3]gongying = t1.xpath('//div[@class="real-information"]/span/text()')shop = gongying[1]shop_call = gongying[3]shop_time = gongying[5]content = str(name) + ',' + str(score) + ',' + str(color) + ',' + str(size) + ',' + str(get_style) + ',' + str(category) + ',' + str(duihuan) + ',' \+ str(shop) + ',' + str(shop_call) + ',' + str(shop_time) + ',' + str(url) + '\n'return contentexcept Exception as e:log.info(e)log.info("##################this url is a no response: %s" % url)def get_data_all(pages):pages = int(pages)for page in range(1,pages + 1):log.info('正在获取第%s页商品...' %page)urls_one = get_urls(page)log.info("该页所有商品URL: %s" % urls_one)log.info("正在全力工作中......")count = 0if not urls_one:continuetry:for i in urls_one:content = get_data(i)gQueue.put(content)#count += 1#today_time = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))#log.info(f'{today_time}###第{count}条###',content)time.sleep(3.4)#file.write(content)#log.info(content)except Exception as e:log.info(e)continuelog.info('成功取到所有数据.')def save_data():count = 0title = '商品名,兑换积分,商品规格,商品颜色,购买方式,分类,兑换,供货商名,供货商电话,供货商工作时间,商品链接\n'times = time.time()local_time = time.localtime(times)today = time.strftime("%Y-%m-%d",local_time)today_time = time.strftime("%Y-%m-%d %H:%M:%S",local_time)file_name = '/root/py/work-' + today + '.csv'with open(file_name, 'w', encoding='gbk') as file:file.write(title)while True:time.sleep(0.1)if not gQueue.empty():msg = gQueue.get()if msg == None:continuecount = count + 1today_time = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))log.info(f'%s###第%s条###%s' % (today_time,count,msg))with open(file_name, 'a', encoding='gbk') as file:file.write(msg)def send_mail():# 发信方的信息:发信邮箱,QQ 邮箱授权码# 授权码password可以在qq邮箱网站申请:设置->账户->开启服务:POP3/SMTP服务 (如何使用 Foxmail 等软件收发邮件?)选择开启\即可生成授权码from_addr = '......@qq.com'password = '......'# 收信方邮箱#to_addr_qq = '......@qq.com'to_addr_qq = '......@qq.com'to_addr = '......@nooce.cn'# 发信服务器smtp_server = 'smtp.qq.com'html_msg = """<p>csv文件</p>"""# 创建一个带附件的实例msgmsg = MIMEMultipart()msg['From'] = Header('Q')  # 发送者msg['To'] = Header('珍')  #times = time.time()local_time = time.localtime(times)today = time.strftime("%Y-%m-%d",local_time)today_title = time.strftime("%Y年%m月%d日,",local_time)subject = today_title + '来自Q sir的邮件'msg['Subject'] = Header(subject, 'utf-8')  # 邮件主题# 邮件正文内容msg.attach(MIMEText(html_msg, 'html', 'utf-8'))# 构造附件1,传送当前目录下的 test1.txt 文件context = '/root/py/work-' + today + '.xlsx'att1 = MIMEText(open(context, 'rb').read(), 'base64', 'utf-8')att1["Content-Type"] = 'application/octet-stream'# 这里的filename可以任意写,写什么名字,邮件中显示什么名字att1["Content-Disposition"] = 'attachment; filename="zhuzhu-2022.xlsx"'msg.attach(att1)try:smtpobj = smtplib.SMTP_SSL(smtp_server)smtpobj.connect(smtp_server, 465)    # 建立连接--qq邮箱服务和端口号smtpobj.login(from_addr, password)   # 登录--发送者账号和口令smtpobj.sendmail(from_addr, to_addr, msg.as_string())smtpobj.sendmail(from_addr, to_addr_qq, msg.as_string())log.info("给小可爱的邮件已经成功发送!")except smtplib.SMTPException:log.info("无法发送邮件哦")finally:# 关闭服务器smtpobj.quit()def csv_excel():try:times = time.time()local_time = time.localtime(times)today = time.strftime("%Y-%m-%d",local_time)#filename = '/root/py/work-' + today + '.csv'#csv_file=pd.read_csv(filename, low_memory=False, encoding='gbk')#csv_file.to_excel('/root/py/work-' + today + '.xlsx', index=False, encoding='gbk')xlsFilepath = '/root/py/work-' + today + '.xlsx'csv_path = '/root/py/work-' + today + '.csv'my_dataframe = pd.read_csv(csv_path, low_memory=False, encoding='gbk')writer = pd.ExcelWriter(xlsFilepath, engine='xlsxwriter')#写excel文件使用pandas to_excelmy_dataframe.to_excel(writer, startrow = 1, sheet_name='Sheet1', index=False)workbook = writer.bookworksheet = writer.sheets['Sheet1']#遍历每一列并设置width ==该列的最大长度。填充长度也增加了2。for i, col in enumerate(my_dataframe.columns):# 求列I的长度column_len = my_dataframe[col].astype(str).str.len().max()# 如果列标题较大,则设置长度# 大于最大列值长度column_len = max(column_len, len(col)) + 2# 设置列的长度worksheet.set_column(i, i, column_len)writer.save()log.info("csv to excel success")return 0except Exception as e:log.info('csv to excel failed,reason is%s' % e)return -1def main():try:pages = 15#pages = func_timeout(10, lambda: input('请输入总页数(要输入整数哦):'))except ValueError as e:log.info('不和你玩了!')return#except FunctionTimedOut:#    pages = 10#    log.info('输入超时,默认获取10页数据:')t1 = threading.Thread(target=get_data_all,args=(pages,))t2 = threading.Thread(target=save_data)t1.setDaemon(True)t2.setDaemon(True)t1.start()t2.start()while True:time.sleep(10)if t1.isAlive():passelse:exc = csv_excel()if exc != 0:returnlog.info("####准备发送邮件啦####")time.sleep(25)send_mail()log.info("####任务结束####")break
if __name__ == '__main__':main()

2、使用linux的crontab执行定时任务

crontab -e

05 09 * * * /usr/bin/python3 /root/py/work.py &
30 13 * * 1 /usr/bin/python3 /root/py/work.py &

3、使用httpx异步调用任务

#!/usr/bin/python3
import re
import httpx
import asyncio,aiohttp
import random,time
from lxml import html
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
#import threading
from queue import Queue
import time,os
import pandas as pd
from func_timeout import FunctionTimedOut, func_timeoutgQueue = Queue()headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8","Accept-Encoding": "gzip, deflate","Accept-Language": "en-US,en;q=0.5","Connection": "keep-alive","Host": "xyk.cebbank.com","Upgrade-Insecure-Requests": "1","User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36"}dict_score = {}
url_res_dict = {}def send_mail():# 发信方的信息:发信邮箱,QQ 邮箱授权码from_addr = '@qq.com'password = ''# 收信方邮箱#to_addr_qq = '@qq.com'to_addr_qq = '@qq.com'to_addr = '@nooce.cn'# 发信服务器smtp_server = 'smtp.qq.com'html_msg = """<p>csv文件</p>"""# 创建一个带附件的实例msgmsg = MIMEMultipart()msg['From'] = Header('Q')  # 发送者msg['To'] = Header('珍')  #times = time.time()local_time = time.localtime(times)today = time.strftime("%Y-%m-%d",local_time)today_title = time.strftime("%Y-%m-%d:",local_time)subject = today_title + '来自Q sir的邮件'msg['Subject'] = Header(subject, 'utf-8')  # 邮件主题# 邮件正文内容msg.attach(MIMEText(html_msg, 'html', 'utf-8'))# 构造附件1,传送当前目录下的 test1.txt 文件context = 'work-' + today + '.xlsx'att1 = MIMEText(open(context, 'rb').read(), 'base64', 'utf-8')att1["Content-Type"] = 'application/octet-stream'# 这里的filename可以任意写,写什么名字,邮件中显示什么名字att1["Content-Disposition"] = 'attachment; filename="zhuzhu-2022.xlsx"'msg.attach(att1)try:smtpobj = smtplib.SMTP_SSL(smtp_server)smtpobj.connect(smtp_server, 465)    # 建立连接--qq邮箱服务和端口号smtpobj.login(from_addr, password)   # 登录--发送者账号和口令smtpobj.sendmail(from_addr, to_addr, msg.as_string())smtpobj.sendmail(from_addr, to_addr_qq, msg.as_string())print("给小可爱的邮件已经成功发送!")except smtplib.SMTPException:print("无法发送邮件哦")finally:# 关闭服务器smtpobj.quit()def get_urls(page):global dict_scoreurl0 = "https://xyk.cebbank.com/jfmall/search?keywords=&pageNo=" + str(page)with httpx.Client() as client:req0 = client.get(url0,headers=headers,timeout=30)req0.decoding = 'gbk'#result = re.compile(r'class="t1 ">.*? <a target="_blank" title=".*?" href="(.*?)".*? <span class="t2">',re.S)#无re.S只在每一行内匹配result1 = re.compile('<div class="main-item-list-title">.*?<a href="(.*?)" class="text-hover-black js-filter-title"   rel="noopener noreferrer"',re.S)result2 = re.compile('<span class="text-color-red text-font-size-18 text-font-weight-bold">(.*?)</span>',re.S)url = re.findall(result1, req0.text)jifen = re.findall(result2, req0.text)urls  = ["https://xyk.cebbank.com" + u1 for u1 in  url]if 'This is 403 error page' in req0.text:print('have a 403 error,function not use')returnfor i in range(0,len(urls)):dict_score[urls[i]] = jifen[i]return urlsdef deal_size_color(data):color = ''size = ''if len(data) == 0:color,size = '无','无'if len(data) == 1:if '色' in data[0]:color = data[0]size = '无'else:size = data[0]color = '无'if len(data) == 2:if '色' in data[0]:color = data[0]size = data[1]else:size = data[0]if '色' in data[1]:color = data[1]else:color = '无'if ',' in color:color = color.replace(',',';')if ',' in size:size = size.replace(',',';')if '"' in size:size = size.replace('"','')return [color,size]async def get_data(url):global gQueueglobal dict_scoreglobal countglobal url_res_dictcount +=  1try:#with httpx.Client() as client:async with asyncio.Semaphore(500):async with httpx.AsyncClient() as client:res = await client.get(url,headers=headers,timeout=20)url_res_dict[res] = urlgQueue.put(res)except Exception as e:print('超时数据自动跳过.')'''async with asyncio.Semaphore(10):async with aiohttp.ClientSession() as session:#res = await client.get(url,headers=headers,timeout=30)async with session.get(url,headers=headers,timeout=30) as response:res = await response.read()url_res_dict[res] = urlgQueue.put(res)'''
def save_csv():global url_res_dict,dict_scorewhile not gQueue.empty():try:     res = gQueue.get()url = url_res_dict[res]# aiohttp#res = res.decode('UTF-8')#t1 = html.fromstring(res)# httpxres.encoding = 'utf-8't1 = html.fromstring(res.text)name = t1.xpath('//div[@class="product-detail-content-title js-itemId"]/text()')[0].strip()duihuan = t1.xpath('//div[@class="text-color-red text-font-weight-bold"]/text()')[0].strip()score = dict_score[url]#color = t1.xpath('//span[@class="meta-title"]/text()')[0].strip()size_col = t1.xpath('//span[contains(@class,"meta-title")]/text()')#[0].strip()sc = deal_size_color(data=size_col)size = sc[1].strip()color = sc[0].strip()get_style = t1.xpath('//span[@class="exchangeWay"]/text()')[0].strip()categorys = t1.xpath('//a[@class="js-category-select"]/text()')tt = [i.strip() for i in categorys if i.strip()]category = tt[3]gongying = t1.xpath('//div[@class="real-information"]/span/text()')shop = gongying[1]shop_call = gongying[3]shop_time = gongying[5]content = str(name) + ',' + str(score) + ',' + str(color) + ',' + str(size) + ',' + str(get_style) + ',' + str(category) + ',' + str(duihuan) + ',' \+ str(shop) + ',' + str(shop_call) + ',' + str(shop_time) + ',' + str(url) + '\n'local_time = time.localtime(time.time())today = time.strftime("%Y-%m-%d",local_time)today_time = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))#print(content)if content == None:returnfile_name = 'work-' + today + '.csv'with open(file_name, 'a', encoding='gbk') as file:file.write(content)except Exception as e:print(e)def csv_excel():print("开始转换成excel数据")try:times = time.time()local_time = time.localtime(times)today = time.strftime("%Y-%m-%d",local_time)xlsFilepath = 'work-' + today + '.xlsx'csv_path = 'work-' + today + '.csv'my_dataframe = pd.read_csv(csv_path, low_memory=False, encoding='gbk')#print(my_dataframe['兑换积分'].sort_values())if not len(my_dataframe):print("not data")return -1my_dataframe = my_dataframe.sort_values(by='兑换积分')writer = pd.ExcelWriter(xlsFilepath, engine='xlsxwriter')#写excel文件使用pandas to_excelmy_dataframe.to_excel(writer, startrow = 1, sheet_name='Sheet1', index=False)workbook = writer.bookworksheet = writer.sheets['Sheet1']#遍历每一列并设置width ==该列的最大长度。填充长度也增加了2。for i, col in enumerate(my_dataframe.columns):# 求列I的长度column_len = my_dataframe[col].astype(str).str.len().max()# 如果列标题较大,则设置长度# 大于最大列值长度column_len = max(column_len, len(col)) + 2# 设置列的长度worksheet.set_column(i, i, column_len)writer.save()print("转换成excel表格成功。")#return 0except Exception as e:print('转换成excel表格失败,原因 is%s' % e)return -1else:times = time.time()local_time = time.localtime(times)today = time.strftime("%Y-%m-%d",local_time)path = 'work-' + today + '.csv'if os.path.exists(path):os.remove(path)return 0def get_tasks():#a = input("请输入总页数")try:pages = func_timeout(15, lambda: input('请输入需要的数据总页数,默认每页20条数据(要输入整数哦):'))mail = func_timeout(15, lambda: input('是否发送邮件,请输入"yes" or "no":'))except FunctionTimedOut:pages = 10mail = 'no'print('输入超时,默认获取10页数据,不发邮件哦')print("开始获取数据了哦,默认按照兑换积分升序排序。")global count#pages = 15count = 0title = '商品名,兑换积分,商品规格,商品颜色,购买方式,分类,兑换,供货商名,供货商电话,供货商工作时间,商品链接\n'times = time.time()local_time = time.localtime(times)today1 = time.strftime("%Y-%m-%d",local_time)today_time = time.strftime("%Y-%m-%d %H:%M:%S",local_time)file_name = 'work-' + today1 + '.csv'with open(file_name, 'w', encoding='gbk') as file:file.write(title)pages = int(pages)urls_all = []for page in range(1,pages + 1):print('正在获取第%s页商品...' %page)time.sleep(0.5)urls_all.extend(get_urls(page))print("所有商品URL: %s" % len(urls_all))return (urls_all,mail)if __name__ == '__main__':result = get_tasks()if len(result[0]) > 500:a,b = len(result[0]),500#由于windows最大并发数509,此处分割最大请求为500res = lambda a,b:[(i*b,i*b+b) for i in range(0,int(a/b))]L1 = res(a,b)L1.append((L1[-1][-1],a))print(f"{L1}--由于数据条数为:{len(result[0])}条,需要分{len(L1)}次运行")start = time.time()loop = asyncio.get_event_loop()print("开始进行并发请求中...")# 判断是否需要多次使用loop处理并发if len(result[0]) > 500:for i in L1:tasks=[loop.create_task(get_data(i))for i in result[0][i[0]:i[1]]]loop.run_until_complete(asyncio.wait(tasks))else:tasks=[loop.create_task(get_data(i))for i in result[0]]loop.run_until_complete(asyncio.wait(tasks))loop.close()save_csv()end = time.time()print(f"执行完成,共耗时: {end - start}秒")csv_excel()if result[1] == 'yes' or result[1] == 'y':send_mail()print("邮件发送成功。")else:print("用户取消发送邮件邮件。")

python爬虫小测试相关推荐

  1. 9139 位艺人在 Python 面前不值一提 # Python 爬虫小课 5-9

    本篇博客的最终目标是爬取世界上 9139 位艺人的身高.体重.生日.血型,当然有些数据目标网站没有提供,不在做过多的扩展. 爬虫小课系列文章导读链接 第一篇:Python 爬虫小课 1-9 宝妈程序媛 ...

  2. 某徒步旅游网站python爬虫小练习

    yxk周边游网站python爬虫小练习(跨页面) 代码很简单,关键一个是 encoding="utf_8_sig" , 否则乱码,注意writerow()处理list import ...

  3. 虎嗅 24 小时点赞器,一个案例附带一个爬虫技巧,Python 爬虫小课 7-9

    很多平台都有点赞功能,今天提供的这个思路可用于很多平台,希望可以掌握该技巧,实现你自己的点赞器.本案例目标为虎嗅 24 小时频道点赞. 爬虫小课系列文章导读链接 第一篇:Python 爬虫小课 1-9 ...

  4. 25 岁以上的程序员,认识不了几个中药材的。Python 爬虫小课 9-9

    中药材在橡皮擦眼中,只有 马钱子.决明子.苍耳子.还有莲子.黄药子.苦豆子.川楝子.我要面子,这是少时从<本草纲目>学来的. 其余的也就知道个枸杞.三七.藿香正气水.板蓝根了,为了摆脱不认 ...

  5. 学会这 10000 个段子,成为 IT 职场幽默达人。Python 爬虫小课 8-9

    现代职场达人,应该做到 有情.有趣.有用.有品.好了,去掉 "有" 字你就成了.那如何成为职场幽默达人呢,咱需要一定的素材也就是段子,多看段子才能多说段子,并且还能说高级段子. 点 ...

  6. 小吃搜搜乐,弄点小吃数据放在本地、Python 爬虫小课 6-9

    最近查询一下河北有哪些小吃,找来找去不是很方便,发现百度有个接口,顺手牵一下数据,通过各省份的枚举,把数据都整理到 Excel 中,以后在查询就比较方便了. 下图为最终抓取数据格式,都是好吃的. 爬虫 ...

  7. 1s 爬取到 1131 只数码兽,送给《数码宝贝:最后的进化》> Python 爬虫小课 4-9

    童年回忆<数码宝贝:最后的进化>10 月 30 日在中国内地上映.所有和我们同龄的人都仍然记得数码宝贝,并且印象最深的还是第一部, 那永远的第一部! 本系列文章导读链接 第一篇:Pytho ...

  8. python爬虫小项目挣钱兼职,python程序员兼职平台

    python爬虫去哪接单 python爬虫接单的方式有两种一.接定制需求的单子爬虫定制的需求其实很多,比如 "爬取某某电商网站的评论",这类需求一般是按照爬取数据量的大小来收费,价 ...

  9. python 爬虫 小姐姐

    声明:本代码仅做学习python爬虫研究之用,请勿用于不正当用途. 运行:全局搜索 'F:/python_study/python/Pictures/' 替换自己的文件目录,然后直接运行即可 # 目标 ...

最新文章

  1. IntelliJ IDEA中Maven插件无法更新索引之解决办法
  2. 飞桨助力智能车竞赛升级,免费赠送EB开发板!
  3. .NET弹出对话框小结
  4. jQuery的attr()和prop()方法
  5. xml gridview控件增删改查_Mybatis之XML如何映射到方法
  6. How to write an operating system
  7. Fanout交换器-编写消费者
  8. brew安装指定版本mysql,Mac 系统为 Valet 开发环境安装指定版本 MySQL
  9. MapInfo MapXtreme 2005 WebGIS 简单鹰眼设计(转)
  10. 深度融合 | 当推荐系统遇见知识图谱(三)
  11. wordPress设计网页实践
  12. LeetCode 304. 二维区域和检索 - 矩阵不可变(动态规划)
  13. (笔记)MySQL 之 Metadata Locking 研究(5.5版本)
  14. ListMapString,Object之按照某个字段排序
  15. html表白程序源码_表白程序源码html_程序员表白代码html (1)
  16. windows注册表启动项
  17. adb发送什么命令能在手机屏幕弹窗显示_将平板、手机作为电脑第二屏幕(Linux系统下)...
  18. 2017 ACM Arabella Collegiate Programming Contest(solved 11/13)
  19. HTML5:canvas基础
  20. OKHTTP学习之高级特性

热门文章

  1. 2019年等保2.0标准的安全区域边界解读
  2. 生产可能性边界 凹向原点问题(只是为了保存自己的想法,非相关人员请忽视这篇博客)
  3. CSS图片居中,多余隐藏
  4. 【数据库连接池】c3p0-druid-SpringJDBC
  5. AI art 实验:同样的Prompt, DALLE2 跟 Disco Diffusion 的创作大比拼
  6. 关于kernel32基地址获取
  7. 什么是static?
  8. appserv 64位 php5.6,appserv下载-appserv最新官方版下载v8.6.0免费版-99wo下载站
  9. 计算机课打字评课,教师听课评语集锦
  10. 人肉工程在机器学习实践中的作用