一、说明

由于国家线快出了,故写了一份爬取小木虫网站调剂信息的爬虫代码,方便信息查看。此代码仅用于学习,不作为任何商业用途。

二、代码–单线程

单线程示例
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#!~/opt/anaconda3/bin/python
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import re
import pandas as pd
import os


# 获取网页
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ''


# 获取数据
def getDataInfo(infoList, url, pre_params, *args):
params = []
count = -1
for i in args:
count += 1
par_ = pre_params[count] + i
params.append(par_)

# 根据参数获取访问链接
for param in params:
url += param + '&'

# print(url)
html = getHTMLText(url)
soup = BeautifulSoup(html, 'html.parser')

# 获取页码数,并处理空页异常
try:
pages_tag = soup.find_all('td', 'header')[1].string
pages = int(re.split('/', pages_tag)[1])
except:
pages = 0

# 判读是否只有一页
if pages == 0:
pages += 1

for i in range(pages): # 遍历每一页
page = i + 1
url = url + '&page=' + str(page)
html = getHTMLText(url)
soup = BeautifulSoup(html, 'html.parser')
tbody = soup.find_all('tbody', 'forum_body_manage')[0]
trs = tbody.find_all('tr') # 每个学校的全部信息被tr标签包围
for tr in trs: # 遍历每一个学校
dicts = {}
href = tr.find_all('a')[0].get('href') # 定位至a标签,提取href的属性值
tds = tr.find_all('td') # 每个学校的各个信息包含在td标签内
lens = len(tds)
for i in range(lens): # 将各个学校信息添加至字典中
if i == 0:
title = tds[i].find('a').string
dicts[i] = title
else:
dicts[i] = tds[i].string
dicts['href'] = href
print(dicts)
infoList.append(dicts) # 每一个学校的信息,添加至列表


def outputCSV(infoList, path):
data = pd.DataFrame(infoList)
# with open(r'./info.csv','w+',encoding='utf-8') as f:
try:

data.columns = ['标题', '学校', '门类/专业', '招生人数', '发布时间', '链接']
except:
print('没有调剂信息...')

try:
if not os.path.exists(path):
data.to_csv(path)
print('保存成功')
else:
print('路径存在')
except:
print('保存失败')


# 设定查询参数 -- 专业、年份
def parameters(pro_='', pro_1='', pro_2='', year=''):
paramsList = [pro_, pro_1, pro_2, year]
return paramsList


def main():
url = 'http://muchong.com/bbs/kaoyan.php?'
path = './2020计算机调剂信息(截止4.09).csv'
pre_params = ['r1%5B%5D=', 'r2%5B%5D=', 'r3%5B%5D=', 'year=']
params = parameters(pro_='08', pro_1='0812',year='2020')
dataList = []
getDataInfo(dataList, url, pre_params, *params)
outputCSV(dataList, path)

main()

三、代码–多线程

多线程示例
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
#! ~/opt/anaconda3/bin/python
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import re
import pandas as pd
import os
import time
from threading import Thread
from threading import Lock
import time


def getHTMLText(url):
"""
获取网页
"""
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ''


def getPages(infoList, url, pre_params, *args):
"""
获取当前需要爬取的页面数,及完整链接
"""

params = []
count = -1
for i in args:
count += 1
par_ = pre_params[count] + i
params.append(par_)

for param in params:
url += param + '&'

# print(url)
html = getHTMLText(url)
soup = BeautifulSoup(html, 'html.parser')

# 处理空页异常
try:
pages_tag = soup.find_all('td', 'header')[1].string
pages = int(re.split('/', pages_tag)[1])
except:
pages = 0

# 判读是否只有一页
if pages == 0:
pages += 1

return pages, url


page = 0
lock = Lock()


def getDataInfo(infoList, pages, url):
"""
获取数据信息
"""
global page
while True:
lock.acquire()
page += 1
lock.release()
if page > pages:
break
url = url + '&page=' + str(page)
time.sleep(1)
# lock.acquire()
html = getHTMLText(url)
soup = BeautifulSoup(html, 'html.parser')
tbody = soup.find_all('tbody', 'forum_body_manage')[0]
trs = tbody.find_all('tr') # 每个学校的全部信息被tr标签包围
for tr in trs: # 遍历每一个学校
dicts = {}
href = tr.find_all('a')[0].get('href') # 定位至a标签,提取href的属性值
tds = tr.find_all('td') # 每个学校的各个信息包含在td标签内
lens = len(tds)
for i in range(lens):
if i == 0:
title = tds[i].find('a').string
dicts[i] = title
else:
dicts[i] = tds[i].string
dicts['href'] = href
print(dicts)
infoList.append(dicts)


def outputCSV(infoList, path):
"""
输出文档
"""
data = pd.DataFrame(infoList)
# with open(r'./info.csv','w+',encoding='utf-8') as f:
try:

data.columns = ['标题', '学校', '门类/专业', '招生人数', '发布时间', '链接']
data.sort_values(by='发布时间', ascending=False, inplace=True)
data = data.reset_index(drop=True)
except:
print('没有调剂信息...')
return

try:
if not os.path.exists(path):
data.to_csv(path)
print('爬取成功')
else:
print('路径存在')
except:
print('保存失败')


def parameters(pro_='', pro_1='', pro_2='', year=''):
"""
设定查询参数 -- 专业、年份
"""
paramsList = [pro_, pro_1, pro_2, year]
return paramsList


def threadingUp(count, infoList, pages, url):
"""
启动多线程
"""
threadList = []
iList = []
for i in range(count):
iList.append(i)
t = Thread(target=getDataInfo, args=(infoList, pages, url))
t.start()
threadList.append(t)
for thread in threadList:
thread.join()


def main():
url = 'http://muchong.com/bbs/kaoyan.php?'
path = './08.csv'
pre_params = ['r1%5B%5D=', 'r2%5B%5D=', 'r3%5B%5D=', 'year=']
params = parameters(pro_='08', year='2020')
dataList = []
count = 1000
pages, url_ = getPages(dataList, url, pre_params, *params)
start = time.time()
threadingUp(count, dataList, pages, url_) # 多线程
# getDataInfo(dataList,pages,url_) # 单线程
outputCSV(dataList, path)
end = time.time()
print('时间:'+str(end - start))


if __name__ == "__main__":
main()

四、代码使用参数说明

1
2
3
4
5
6
7
8
9
10
11
12
def parameters(pro_='', pro_1='', pro_2='', year=''):
paramsList = [pro_, pro_1, pro_2, year]
return paramsList

def main():
url = 'http://muchong.com/bbs/kaoyan.php?'
path = './data_info.csv'
pre_params = ['r1%5B%5D=', 'r2%5B%5D=', 'r3%5B%5D=', 'year=']
params = parameters(pro_='08', pro_1='0801')
dataList = []
getDataInfo(dataList, url, pre_params, *params)
outputCSV(dataList, path)

主体代码已写完,只需要修改main函数中params中的相关参数,即可使用。

parameters函数主要用于返回查询的参数。默认参数都为空。如果都不填,则是爬取小木虫全部年份,全部专业的所有调剂信息。

params具体参数说明:

  • pro_

    所要查询的学科门类。可查询的见下图:

    只要查询填写对应学科门类前的数字即可。例如工学,则:pro_='08'

    注意:填写的为字符串格式

  • pro_1

    填写的一级学科代码。如下图:

    以电子科学与技术为例,同样只需要填写前面代码即可。如:pro_2='0806'

    如果这一项不填,则查询的是前一个填写的整个学科门类所有信息。

  • pro_2

    填写的二级学科代码。如图:

    例如查询物理电子学调剂信息,同上。则填:pro_2='080901'。如果不填,则默认查询的是上一级学科下的所有调剂信息。例如,这里就是全部的电子科学与技术的调剂信息。

  • year

    查询年份。例如查询2020年。year='2020'注意:同样是字符串类型。如果不填,则是查询全部的年份。

    其中,main()函数中的保存路径path,可自定义修改。

总结:只需修改params和保存路径url即可。

五、效果图

小木虫调剂信息网站:http://muchong.com/bbs/kaoyan.php

下载源码

Comment