1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
|
import requests from bs4 import BeautifulSoup import re import pandas as pd import os import time from threading import Thread from threading import Lock import time
def getHTMLText(url): """ 获取网页 """ try: r = requests.get(url, timeout=30) r.raise_for_status() r.encoding = r.apparent_encoding return r.text except: return ''
def getPages(infoList, url, pre_params, *args): """ 获取当前需要爬取的页面数,及完整链接 """
params = [] count = -1 for i in args: count += 1 par_ = pre_params[count] + i params.append(par_)
for param in params: url += param + '&'
html = getHTMLText(url) soup = BeautifulSoup(html, 'html.parser')
try: pages_tag = soup.find_all('td', 'header')[1].string pages = int(re.split('/', pages_tag)[1]) except: pages = 0
if pages == 0: pages += 1
return pages, url
page = 0 lock = Lock()
def getDataInfo(infoList, pages, url): """ 获取数据信息 """ global page while True: lock.acquire() page += 1 lock.release() if page > pages: break url = url + '&page=' + str(page) time.sleep(1) html = getHTMLText(url) soup = BeautifulSoup(html, 'html.parser') tbody = soup.find_all('tbody', 'forum_body_manage')[0] trs = tbody.find_all('tr') for tr in trs: dicts = {} href = tr.find_all('a')[0].get('href') tds = tr.find_all('td') lens = len(tds) for i in range(lens): if i == 0: title = tds[i].find('a').string dicts[i] = title else: dicts[i] = tds[i].string dicts['href'] = href print(dicts) infoList.append(dicts)
def outputCSV(infoList, path): """ 输出文档 """ data = pd.DataFrame(infoList) try:
data.columns = ['标题', '学校', '门类/专业', '招生人数', '发布时间', '链接'] data.sort_values(by='发布时间', ascending=False, inplace=True) data = data.reset_index(drop=True) except: print('没有调剂信息...') return
try: if not os.path.exists(path): data.to_csv(path) print('爬取成功') else: print('路径存在') except: print('保存失败')
def parameters(pro_='', pro_1='', pro_2='', year=''): """ 设定查询参数 -- 专业、年份 """ paramsList = [pro_, pro_1, pro_2, year] return paramsList
def threadingUp(count, infoList, pages, url): """ 启动多线程 """ threadList = [] iList = [] for i in range(count): iList.append(i) t = Thread(target=getDataInfo, args=(infoList, pages, url)) t.start() threadList.append(t) for thread in threadList: thread.join()
def main(): url = 'http://muchong.com/bbs/kaoyan.php?' path = './08.csv' pre_params = ['r1%5B%5D=', 'r2%5B%5D=', 'r3%5B%5D=', 'year='] params = parameters(pro_='08', year='2020') dataList = [] count = 1000 pages, url_ = getPages(dataList, url, pre_params, *params) start = time.time() threadingUp(count, dataList, pages, url_) outputCSV(dataList, path) end = time.time() print('时间:'+str(end - start))
if __name__ == "__main__": main()
|