1.匿名函数
- 结构:
lambda x1,x2...xn:表达式
- 参数:可以是无限多个,但表达式只有一个
sum_sum = lambda x1, x2: x1 + x2
print(sum_sum(2, 3))
name_info_list = [
('张三', 4500),
('李四', 9900),
('王五', 2000),
('赵六', 5500),
]
name_info_list.sort(key=lambda x: x[1], reverse=True)
print('排序后:', name_info_list)
stu_info = [
{"name": 'zs', "age": '18'},
{"name": 'ls', "age": '19'},
{"name": 'ww', "age": '20'},
{"name": 'tq', "age": '21'},
]
stu_info.sort(key=lambda i: i['age'], reverse=True)
print('排序后:', stu_info)
2.列表推导式、列表解析和字典解析
- 列表推导式:
[表达式 for 临时变量 in 可迭代对象 可追加条件]
li = []
for i in range(10):
li.append(i)
print(li)
# 使用列表推导式
# [表达式 for 临时变量 in 可迭代对象 可追加条件]
print([i for i in range(10)])
- 列表解析
# 筛选出列表中所有偶数
li1 = []
for i in range(10):
if i % 2 == 0:
li1.append(i)
print(li1)
# 使用列表解析测试
print([i for i in range(10) if i % 2 == 0])
# 筛选出列表中大于0的数
from random import randint
num_list = [randint(-10, 10) for _ in range(10)]
print(num_list)
print([i for i in num_list if i > 0])
- 字典解析
# 生成4个学生成绩
from random import randint
stu_grades = {'stu{}'.format(i): randint(50, 90) for i in range(1, 5)}
print(stu_grades)
# 筛选大于60分的学生
print({k: v for k, v in stu_grades.items() if v > 60})
3.matplotlib 绘图
- 导入
from matplotlib import pyplot as plt
plt.rcParams["font.sans-serif"] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
import numpy as np
- 使用100个点绘制[0, 2π]正余弦曲线图
linspace 左闭右闭区间的等差数列
x = np.linspace(0, 2*np.pi, num=100)
print(x)
# 正弦余弦在同一坐标
y = np.sin(x)
cosy = np.cos(x)
plt.plot(x, y, color='g', linestyle='--', label='sin(x)')
plt.plot(x, cosy, color='r', label='cos(x)')
plt.xlabel('时间(s)')
plt.ylabel('电压(v)')
plt.title('欢迎来到python世界')
# 图例
plt.legend()
plt.show()
- 柱状图
import string
from random import randint
print(string.ascii_uppercase[0:6])
# ['A', 'B', 'C'...]
x = ['口红{}'.format(x) for x in string.ascii_uppercase[0:5]]
y = [randint(200, 500) for _ in range(5)]
print(x)
print(y)
plt.xlabel('口红品牌')
plt.ylabel('价格(元)')
plt.bar(x, y)
plt.show()
- 饼图
from random import randint
import string
counts = [randint(3500, 9000) for _ in range(9)]
labels = ['员工{}'.format(x) for x in string.ascii_lowercase[0:9]]
# 距离圆心点距离
explode = [0.1, 0, 0, 0, 0, 0, 0, 0, 0]
colors = ['red', 'purple', 'blue', 'yellow', 'gray', 'green']
plt.pie(counts, explode=explode, shadow=True, labels=labels, autopct='%1.lf%%',
colors=colors)
plt.legend(loc=2)
plt.axis('equal')
plt.show()
- 散点图
均值为0 标准差为1的正态分布数据
x = np.random.normal(0, 1, 1000000)
y = np.random.normal(0, 1, 1000000)
# alpha 透明度
plt.scatter(x, y, alpha=0.1)
plt.show()
4.三国TOP10人物分析
import jieba
from wordcloud import WordCloud
from matplotlib import pyplot as plt
plt.rcParams["font.sans-serif"] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
import string
# 1.读取小说
with open('./novel/threekingdom.txt', 'r', encoding='utf-8') as f:
words = f.read()
counts = {}
excludes = {"将军", "却说", "丞相", "二人", "不可", "荆州", "不能", "如此", "商议",
"如何", "主公", "军士", "军马", "左右", "次日", "引兵", "大喜", "天下",
"东吴", "于是", "今日", "不敢", "魏兵", "陛下", "都督", "人马", "不知",
"孔明曰", "玄德曰", "刘备", "云长"}
# 2.分词
words_list = jieba.lcut(words)
# print(words_list)
for word in words_list:
if len(word) <= 1:
continue
else:
# 更新字典中的值
# counts[word] = counts[word] + 1
# 字典.get(k) 如果字典中没有这个键 返回none
counts[word] = counts.get(word, 0) + 1
print(len(counts))
# 3.词语过滤,删除无关词、重复词
counts['孔明'] = counts['孔明'] + counts['孔明曰']
counts['玄德'] = counts['玄德'] + counts['玄德曰']
counts['玄德'] = counts['玄德'] + counts['玄德曰'] + counts['刘备']
counts['关公'] = counts['关公'] + counts['云长']
for word in excludes:
del counts[word]
# 4.排序
items = list(counts.items())
print(items)
items.sort(key=lambda x: x[1], reverse=True)
li = [] # ['孔明',...'曹操',...]
count1 = []
count2 = []
for i in range(10):
# 序列解包
role, count = items[i]
print(role, count)
count1.append(role)
count2.append(count)
# _是告诉循环里面不需要使用临时变量
for _ in range(count):
li.append(role)
# 5.得出结论
text = ' '.join(li)
WordCloud(
font_path='msyh.ttc',
background_color='white',
width=800,
height=600,
# 相邻两个重复词之间的匹配
collocations=False
).generate(text).to_file('./TOP10.png')
# 6.绘制三国TOP10饼图
plt.pie(count2, shadow=True, labels=count1, autopct='%1.lf%%')
plt.legend(loc=2)
plt.axis('equal')
plt.show()
5.练习--红楼梦Top10人物分析
import jieba
from wordcloud import WordCloud
from matplotlib import pyplot as plt
plt.rcParams["font.sans-serif"] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
import string
# 1.读取小说
with open('./novel/all.txt', 'r', encoding='utf-8') as f:
words = f.read()
counts = {}
excludes = {"什么", "一个", "我们", "你们", "如今", "说道", "知道", "起来", "这里",
"出来", "众人", "那里", "自己", "一面", "只见", "太太", "两个", "没有",
"怎么", "不是", "不知", "这个", "听见", "这样", "进来", "咱们", "就是",
"老太太", "东西", "告诉", "回来", "只是", "大家", "姑娘", "奶奶", "老爷",
"凤姐儿", "只得", "丫头", "这些", "他们", "不敢", "出去", "所以"}
# 2.分词
words_list = jieba.lcut(words)
# print(words_list)
for word in words_list:
if len(word) <= 1:
continue
else:
# 更新字典中的值
# counts[word] = counts[word] + 1
# 字典.get(k) 如果字典中没有这个键 返回none
counts[word] = counts.get(word, 0) + 1
print(len(counts))
# 3.词语过滤,删除无关词、重复词
counts['凤姐'] = counts['凤姐'] + counts['凤姐儿'] + counts['王熙凤']
counts['贾母'] = counts['贾母'] + counts['老太太'] + counts['太太'] + counts['奶奶']
counts['黛玉'] = counts['黛玉'] + counts['姑娘'] + counts['林黛玉']
counts['宝玉'] = counts['宝玉'] + counts['贾宝玉']
for word in excludes:
del counts[word]
# 4.排序
items = list(counts.items())
print(items)
# def sort_by_counts(x):
# return x[1]
# items.sort(key=sort_by_counts, reverse=True)
items.sort(key=lambda x: x[1], reverse=True)
# print(items)
li = []
count1 = []
count2 = []
for i in range(10):
# 序列解包
role, count = items[i]
print(role, count)
count1.append(role)
count2.append(count)
# _是告诉循环里面不需要使用临时变量
for _ in range(count):
li.append(role)
# 5.得出结论
text = ' '.join(li)
WordCloud(
font_path='msyh.ttc',
background_color='bisque',
width=800,
height=600,
# 相邻两个重复词之间的匹配
collocations=False
).generate(text).to_file('./HTOP10.png')
# 6.绘制TOP10饼图
plt.pie(count2, shadow=True, labels=count1, autopct='%1.lf%%')
plt.legend(loc=2)
plt.axis('equal')
plt.show()