python爬虫学习

爬虫实战1–糗百

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#coding:utf-8
#运行环境为python2.7版本
#爬取糗百,无需cookie
#按下回车,读取一个段子,显示出段子的发布人,内容以及点赞个数
import urllib
import urllib2
import re
import thread
import time
#糗百爬虫
class QSBK:
#初始化方法,定义一些变量
def __init__(self):
self.pageIndex = 1
self.user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64)'
#初始化headers
self.headers = {'User-Agent': self.user_agent}
#存放段子的变量,每一个元素是每一页的段子们
self.stories = []
# 存放程序是否继续运行的变量
self.enable = False
# 传入某一页的索引获得页面代码
def getPage(self, pageIndex):
try:
url = 'http://www.qiushibaike.com/hot/page/' + str(pageIndex)
request = urllib2.Request(url, headers = self.headers)
response = urllib2.urlopen(request)
pageCode = response.read().decode('utf-8')
return pageCode
except urllib2.URLError, e:
if hasattr(e, "reason"):
print u"连接糗事百科失败,错误原因", e.reason
return None
# 传入某一页代码,返回本页不带图片的段子列表
def getPageItems(self, pageIndex):
pageCode = self.getPage(pageIndex)
if not pageCode:
print "页面加载失败。。。"
return None
pattern = re.compile('<div class="author clearfix">.*?<h2>(.*?)</h2>.*?"content">(.*?)</div>.*?number">(.*?)</.*?number">(.*?)</.',re.S)
items = re.findall(pattern, pageCode)
pageStories = []
for item in items:
replaceBR = re.compile('<br/>')
text = re.sub(replaceBR,"\n",item[1])
pageStories.append([item[0].strip(),text.strip(),item[2].strip(),item[3].strip()])
return pageStories
# 加载并提取页面内容,加入到列表中
def loadPage(self):
#如果当前未看的页数少于2页,则加载新一页
if self.enable == True:
if len(self.stories) < 2:
#获取新一页
pageStories = self.getPageItems(self.pageIndex)
if pageStories:
self.stories.append(pageStories)
#获取完之后页码索引加一,表示下次读取下一页
self.pageIndex += 1
# 调用该方法,回车打印一个段子
def getOneStory(self, pageStories, page):
#遍历一页的段子
for story in pageStories:
#等待用户输入
input = raw_input()
#每当输入回车一次,判断一下是否要加载新页面
self.loadPage()
if input == "Q":
self.enable = False
return
print u"第%d页\t发布人:%s\t赞:%s\t评论:%s\n%s" %(page,story[0],story[2],story[3],story[1])
#开始方法
def start(self):
print u"正在读取糗事百科,按回车查看新段子,Q退出"
self.enable = True
#先加载一页内容
self.loadPage()
#局部变量,控制当前读到了第几页
nowPage = 0
while self.enable:
if len(self.stories) > 0:
#从全局list中获取一页的段子
pageStories = self.stories[0]
#当前读到的页数加一
nowPage += 1
#将全局list中第一个元素删除,因为已经取出
del self.stories[0]
#输出该页的段子
self.getOneStory(pageStories, nowPage)
#调用爬虫
spider = QSBK()
spider.start()

爬虫实战2-爬取百度贴吧的帖子内容

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 爬取百度贴吧楼主发送的文字内容,存储在本地文件中
# @Date : 2018-07-04 14:21:14
# @Author : mutoulazy (mutoulazy@gmail.com)
# @Link : http://www.ccsu.com/
# @Version : 1.0
import re
from urllib import request
seelz = '?see_lz=' + '1'
pn = '&pn=' + '1'
base_url = 'https://tieba.baidu.com/p/4366865181'
arg_url = seelz + pn
url = base_url + arg_url
try:
head = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'}
req = request.Request(url, headers=head)
response = request.urlopen(req).read().decode('UTF-8')
# print(response)
# 建立匹配规则
# <h3 class="core_title_txt pull-left text-overflow vip_red " title="【分析】关于“十年结局”的一些猜度" style="width: 396px">【分析】关于“十年结局”的一些猜度</h3>
pattern1 = re.compile(r'<h3 class="core_title.*?">(.*?)</h3>', re.S)
# <div id="post_content_84414011504" class="d_post_content j_d_post_content ">....文章内容...</div>
pattern2 = re.compile(r'<div id="post_content.*?">(.*?)</div>', re.S)
# 进行正则匹配
result = re.search(pattern1, response)
items = re.findall(pattern2, response)
# 去除内容中的标签
# 去除img标签
removeImg = re.compile('<img.*?>')
# 去除br标签
removeBr = re.compile('<br><br>|<br>')
# 去除a标签
removeA = re.compile('<a.*?>|</a>')
# 其他
removeOtherTag = re.compile('<.*?>')
for item in items:
x = re.sub(removeBr, '\n', item)
x = re.sub(removeA, '', x)
x = re.sub(removeImg, '', x)
x = re.sub(removeOtherTag, '', x)
print (x)
# 输出结果
if result:
print(result.group(1).strip())
else:
print(None)
except request.URLError as e:
if hasattr(e, 'code'):
print(e.code)
elif hasattr(e, 'reason'):
print(e.reason)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 爬取百度贴吧楼主发送的文字内容,存储在本地文件中
# @Date : 2018-07-04 14:58:26
# @Author : mutoulazy (mutoulazy@gmail.com)
# @Link : http://www.ccsu.com/
# @Version : 2.0
import re
from urllib import request
class InitTool:
'''
定义初始化类,目的是为了去获取内容
'''
# 去除img标签
removeimg = re.compile('<img.*?>')
# 去除br标签
replacebr = re.compile('<br><br>|<br>')
# 去除a标签
removea = re.compile('<a.*?>|</a>')
# 去除其它除了上述标签的标签
removeothertag = re.compile('<.*?>')
# 定义类的方法,去除无用的标签
def replace(self, x):
x = re.sub(self.removeimg, '', x)
x = re.sub(self.removea, '', x)
x = re.sub(self.removeothertag, '', x)
x = re.sub(self.replacebr, '\n', x)
# 删除x前后多余的空格
return x.strip()
class BdTb:
'''
定义类 获取网页标题 内容
'''
def __init__(self, baseurl, seelz):
'''
传入基本信息
'''
# 基础地址
self.baseurl = baseurl
# 是否只看楼主
self.seelz = '?see_lz=' + str(seelz)
# 调用去除标签工具,去除无用标签,注意InitTool后面的括号不要丢
self.inittool = InitTool()
# 默认标题
# self.defaultTitle = '百度贴吧'
def getpage(self, pn):
'''
传入页码,获取该页帖子代码,pn为页码
'''
try:
# 构建url
url = self.baseurl + self.seelz + '&pn=' + str(pn)
# 请求头信息
head = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'}
# 发送请求
req = request.Request(url, headers=head)
# 打开网址获取内容
response = request.urlopen(req)
# 返回utf-8格式文本
return response.read().decode('utf-8')
except request.URLError as e:
# 如果有reason,打印报错原因
if hasattr(e, 'reason'):
print('连接失败,错误原因:', e.reason)
return None
def gettitle(self, page):
'''
获取标题
'''
# 定义获取标题正则,re.S代表.可以匹配任何内容,包括'\n'换行
pattern = re.compile('<h3 class="core_title.*?">(.*?)</h3>', re.S)
result = re.search(pattern, page)
if result:
# 如果存在返回标题
return result.group(1).strip()
else:
return None
def getpagenum(self, page):
'''
获取帖子的总页数
'''
# 定义获取总共页数的正则
pattern = re.compile('<li class="l_reply_num.*?</span>.*?<span.*?>(.*?)</span>', re.S)
result = re.search(pattern, page)
if result:
return result.group(1).strip()
else:
return None
def getcontent(self, page):
'''
获取帖子内容
'''
# 定义获取帖子内容的正则
pattern = re.compile('<div id="post_content_.*?>(.*?)</div>', re.S)
# 获取所有内容
items = re.findall(pattern, page)
# 定义一个空列表,收集过滤后的内容
contents = []
for item in items:
# 每一条内容前后加入一个空格
content = "\n" + self.inittool.replace(item) + "\n"
# 添加到列表中
contents.append(content)
return contents
def start(self):
'''
定义主方法,执行之后调用其它方法,将获取的标题和内容写入本地文件test.txt中
'''
# 获取页面
indexpage = self.getpage(1)
# 获取总页数
allpn = self.getpagenum(indexpage)
# 获取标题
title = self.gettitle(indexpage)
if allpn is None:
print('URL已失效,请重试')
return
print('总页数:%s' % allpn)
# 写入标题
with open('test.txt', 'w') as f:
f.write(title)
# 通过for循环,从1到总页数+1获取页码i,分别抓取每页的内容写入文件
for i in range(1, int(allpn)+1):
page = self.getpage(i)
contents = self.getcontent(page)
for content in contents:
# 追加的方式写入文件
with open('test.txt', 'a+') as f:
f.write(str(content))
if __name__ == '__main__':
baseurl = 'https://tieba.baidu.com/p/4366865181'
seelz = 1
bdtb = BdTb(baseurl, seelz)
bdtb.start()

爬虫实战3-爬取伯乐在线图片

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 爬取伯乐在线的图片
# @Date : 2018-07-05 11:17:46
# @Author : mutoulazy (mutoulazy@gmail.com)
# @Link : http://www.ccsu.com/
# @Version : $Id$
import os
import time
import re
from urllib import request
'''
爬起伯乐在线图片的类
'''
class BoLe:
"""初始化参数"""
def __init__(self, baseurl):
self.baseurl = baseurl
"""根据pn获取页面"""
def getpage(self,pn):
try:
url = self.baseurl + str(pn)
head = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'}
time.sleep(1)
req = request.Request(url, headers=head)
response = request.urlopen(url)
return response.read().decode('UTF-8')
except request.URLError as e:
if hasattr(e, 'reason'):
print(e.reason)
if hasattr(e, 'code'):
print(e.code)
'''获取页面中的图片'''
# <p.*?<img class="alignnone" src="(.*?)" width.*?></p>
def getimg(self, page, pn):
pattern = re.compile(r'<img src="(.*?)" .*?>', re.S)
items = re.findall(pattern, page)
for item in items:
if "images/" in item or \
"208c046q3c" in item or \
"208c03c3yt" in item:
continue
print(item)
# 判断字符串做过滤
request.urlretrieve(str(item), 'E:\\' + str(pn) + "\\" + '% s' % time.time() + '.jpg')
'''根据pn建立文件夹'''
def mkdir(self, pn):
path = 'E:\\' + str(pn)
# 通过os模块的exists方法判断目录是否存在,存在返回False,不存在则创建
if os.path.exists(path):
return False
else:
os.mkdir(path)
return True
'''
启动方法
通过传入的start,end参数获取这个断内的所有号码作为用户id
爬取每个id的主页图片
'''
def start(self, start, end):
# 通过for循环方式执行,pn为start和end+1之间的数字,也就是个人id
for pn in range(start, end+1):
page = self.getpage(pn)
if page is None:
print("编号%s的人物信息不存在" % pn)
continue
else:
self.mkdir(pn)
self.getimg(page, pn)
if __name__ == '__main__':
baseurl = 'http://date.jobbole.com/'
bole = BoLe(baseurl)
bole.start(5358, 5368)

爬虫实战4-爬取豆瓣图片

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# 综合练习BeautifulSoup与requests模块
import os
import requests
from urllib import request
from bs4 import BeautifulSoup
"""
爬取豆瓣图片
"""
class DB:
def __init__(self, baseurl):
self.baseurl = baseurl
def geturl(self, pn):
url = self.baseurl + "?pager_offset=" + str(pn)
req = requests.get(url)
soup = BeautifulSoup(req.text, 'lxml')
imgurls = soup.find_all('img', class_='height_min')
urllist = []
for imgurl in imgurls:
urllist.append(imgurl.get('src'))
return urllist
def getimage(self, url):
filename = url.split('/')[-1]
basepath = 'E:\\' + 'douban'
if not os.path.exists(basepath):
os.mkdir(basepath)
path = basepath + "\\" + filename
print("正在保存 %s" % filename)
request.urlretrieve(url, path)
def start(self, pn):
urls = self.geturl(pn)
for url in urls:
self.getimage(url)
def main():
baseurl = 'https://www.dbmeinv.com/'
db = DB(baseurl)
# 简单循环页面(1~10页)
for i in range(10):
db.start(i)
if __name__ == '__main__':
main()

爬虫实战5-爬取拉钩网数据

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import json
from urllib import request
from bs4 import BeautifulSoup
class Lagou:
def __init__(self, baseurl):
self.baseurl = baseurl
def getresponse(self):
try:
url = self.baseurl
head = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'}
req = request.Request(url, headers=head)
return request.urlopen(req).read().decode('utf-8')
except request.URLError as e:
if hasattr(e, 'reason'):
print(e.reason)
if hasattr(e, 'code'):
print(e.code)
def getcontent(self, response):
soup = BeautifulSoup(response, 'lxml')
divlist = soup.find_all('div', class_='list_item_top')
content = []
for list in divlist:
job_name = list.find('h3').string
link = list.find('a', class_="position_link").get('href')
company = list.find('div', class_='company_name').find('a').string
salary = list.find('span', class_='money').string
print(job_name, company, salary, link)
content.append({'job': job_name, 'company': company, 'salary': salary, 'link': link})
return content
def tojson(self, content):
with open('lagou.json', 'w') as fp:
json.dump(content, fp=fp, indent=4)
def start(self):
response = self.getresponse()
content = self.getcontent(response)
self.tojson(content)
if __name__ == '__main__':
baseurl = 'https://www.lagou.com/zhaopin/Python/?labelWords=label'
lagou = Lagou(baseurl)
lagou.start()

爬虫实战6-爬取豆瓣出版社数据(excel存储)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
from urllib import request
import re,xlwt,datetime

html=request.urlopen("https://read.douban.com/provider/all").read() #读取网页源代码内容
wzgz="<a href=\"(.*?)\" class=\"provider-item\"><div class=\"col-media\"><div class=\"cm-left avatar\"><div class=\"avatar\"><img src=\"(.*?)\"/></div></div><div class=\"cm-body\"><div class=\"name\">(.*?)</div><div class=\"works-num\">(.*?) 部作品在售</div></div></div></a>"
xx=re.compile(wzgz).findall(str(html,"utf-8")) #通过正则表达式匹配在网页源代码中提取所需内容
# print(len(xx))

#创建workbook和sheet对象
workbook = xlwt.Workbook()
sheet1 = workbook.add_sheet('sheet1',cell_overwrite_ok=True)

#初始化excel样式
style = xlwt.XFStyle()

#为样式创建字体
font = xlwt.Font()
font.name = 'Times New Roman'
font.bold = True

#设置样式的字体
style.font = font

#在sheet1表的第1行设置字段名称并写入数据
sheet1.write(0,0,"序号",style)
sheet1.write(0,1,"出版社-URL",style)
sheet1.write(0,2,"LOGO-URL",style)
sheet1.write(0,3,"出版社名称",style)
sheet1.write(0,4,"在售作品数量",style)

a=0 #定义行号初始值
h=0 #定义在售数量初始值
for i in xx:
#print(str(a+1),i[0])
sheet1.write(a+1,0,a+1,style) #在第a+1行第1列写入序号
sheet1.write(a+1,1,"https://read.douban.com"+str(i[0]),style) #在第a+1行第2列写入出版社URL
sheet1.write(a+1,2,i[1],style) #在第a+1行第3列写入LOGO-URL
sheet1.write(a+1,3,i[2],style) #在第a+1行第4列写入出版社名称
sheet1.write(a+1,4,int(i[3]),style) #在第a+1行第5列写入在售数量
h+=int(i[3]) #在售数量累计求和
a+=1

if int(len(xx)) == a: #判断XX列表是否遍历结束,并在sheet1表尾行写入在售数量求和的值
t=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
t1=datetime.datetime.now().strftime("%Y%m%d%H%M%S")
sheet1.write(a+1,3,"合计",style) #在sheet1表尾行写入“合计”
sheet1.write(a+1,4,h,style) #在sheet1表尾行写入在售数量累计值
sheet1.write(a+2,3,"采集时间",style) #在sheet1表尾行写入数据采集时间
sheet1.write(a+2,4,t,style) #在sheet1表尾行写入数据采集时间

workbook.save("d:/豆瓣出版社汇总表"+str(t1)+".xls") #保存该excel文件,有同名文件时无法直接覆盖

print("数据写入excel文件完毕!")
print("在售书数量合计:"+str(h))

爬虫实战7-爬取猫眼评论并且持久化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import requests
import json
import time
import random
from openpyxl import workbook # 写入Excel表所用
# 数据接口http://m.maoyan.com/mmdb/comments/movie/248566.json?_v_=yes&offset=1
# 下载第一页的数据
def get_one_page(url):
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'
}
response = requests.get(url, headers=headers)
if (response.status_code == 200):
return response.text
return None
# 解析第一页的数据
def parse_one_page(html):
data = json.loads(html)['cmts']
for item in data:
yield{
'comment':item['content'],
'date':item['time'].split(' ')[0],
'rate':item['score'],
'city':item['cityName'],
'nickname':item['nickName']
}
# 数据持久化 txt
def save_to_txt():
for i in range(1,1001):
url = 'http://m.maoyan.com/mmdb/comments/movie/1212592.json?_v_=yes&offset=' + str(i)
html = get_one_page(url)
print('正在保存第%d页 ' % i)
for item in parse_one_page(html):
with open('ping_lun.txt', 'a', encoding='utf-8') as f:
f.write(item['date'] + ',' + item['nickname'] + ',' + item['city'] + ',' +str(item['rate'])+','+item['comment']+'\n')
time.sleep(5 + float(random.randint(1, 100)) / 20)
# 数据持久化 csv
def save_to_csv():
wb = workbook.Workbook() # 创建Excel对象
ws = []
ws = wb.active # 获取当前正在操作的表对象
ws.append(['时间', '昵称', '城市', '评分', '评论'])
for i in range(1,5):
j = random.randint(1,1000)
url = 'http://m.maoyan.com/mmdb/comments/movie/1212592.json?_v_=yes&offset=' + str(j)
html = get_one_page(url)
print('正在保存第%d次查询结果 ' % i)
for item in parse_one_page(html):
ws.append([item['date'], item['nickname'], item['city'], item['rate'], item['comment']])
print(item['date'] + ',' + item['nickname'] + ',' + item['city'] + ',' +str(item['rate'])+','+item['comment'])
time.sleep(5 + float(random.randint(1, 100)) / 20)
wb.save('ping_lun.xlsx')
if __name__ == '__main__'

爬虫实战8-爬取豆瓣影评并且生成词云

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
# -*- coding: utf-8 -*-
import requests
import re
import jieba
import numpy
import warnings
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from bs4 import BeautifulSoup as bs
from requests.exceptions import RequestException
from wordcloud import WordCloud
matplotlib.rcParams["figure.figsize"] = (10.0, 5.0)
warnings.filterwarnings("ignore")
# 获取短评
def getCommentsById(movieId, pageNum):
eachCommentList = []
if pageNum > 0:
start = (pageNum - 1) * 20
else:
return False
requrl = (
"https://movie.douban.com/subject/"
+ movieId
+ "/comments"
+ "?"
+ "start="
+ str(start)
+ "&limit=20"
)
try:
resp = requests.get(requrl)
html_data = resp.content.decode("utf-8")
soup = bs(html_data, "html.parser")
comment_div_list = soup.find_all("div", class_="comment")
for item in comment_div_list:
comment = item.find_all(name='span', attrs={"class": "short"})[0].string
if comment is not None:
eachCommentList.append(comment)
# print(comment)
if eachCommentList is None:
print("未获取到评论")
exit(1)
except RequestException as e:
print("error: %s" % e)
return eachCommentList
def main(movieId, movieName):
# 获取前10页数据
commentList = []
for i in range(1,11):
commentList_temp = getCommentsById(movieId, i)
commentList.append(commentList_temp)
# 转换为字符串
comments = ""
for k in range(len(commentList)):
for m in range(len(commentList[k])):
comments = comments + str(commentList[k][m].strip())
# 使用正则去掉标点
filtrate = re.compile(r"[^\u4E00-\u9FA5]") # 提取中文
filtrated_str = filtrate.sub(r"", comments)
# 用结巴分词进行中文分词
segment = jieba.lcut(filtrated_str)
# 变成一维矩阵
words_df = pd.DataFrame({"segment": segment})
# 去掉语气停用词
stopwords = pd.read_csv(
"./stopword.txt",
index_col=False,
quoting=3,
sep="t",
names=["stopword"],
encoding="utf-8",
)
words_df = words_df[~words_df.segment.isin(stopwords.stopword)]
# 统计词频
words_stat = words_df.groupby(by=["segment"])["segment"].agg({"计数": numpy.size})
words_stat = words_stat.reset_index().sort_values(by=["计数"], ascending=False)
# 用词云进行显示
wordcloud = WordCloud(
font_path="./simkai.ttf",
background_color="white",
max_font_size=80,
width=1000,
height=860,
margin=2,
)
# 取前1000个高频词
word_frequence = {x[0]: x[1] for x in words_stat.head(1000).values}
print(word_frequence)
wordcloud = wordcloud.fit_words(word_frequence)
plt.imshow(wordcloud)
plt.axis("off")
plt.show(block=False)
img_name = "./" + movieName + ".jpg"
wordcloud.to_file(img_name)
if __name__ == '__main__':
main("27622447", "小偷家族")