利用redis的乐观锁,实现秒杀系统的数据同步(基于watch实现),
用户一:
import redis
conn = redis.Redis(host='127.0.0.1',port=6379)
# conn.set('count',1000)
with conn.pipeline() as pipe:
# 先监视,自己的值没有被修改过
conn.watch('count')
# 事务开始
pipe.multi()
old_count = conn.get('count')
count = int(old_count)
input('我考虑一下')
if count > 0: # 有库存
pipe.set('count', count - 1)
# 执行,把所有命令一次性推送过去
pipe.execute()
ret = pipe.execute()
print(type(ret))
print(ret)
用户二:
import redis
conn = redis.Redis(host='127.0.0.1',port=6379)
with conn.pipeline() as pipe:
# 先监视,自己的值没有被修改过
conn.watch('count')
# 事务开始
pipe.multi()
old_count = conn.get('count')
count = int(old_count)
if count > 0: # 有库存
pipe.set('count', count - 1)
# 执行,把所有命令一次性推送过去
ret=pipe.execute()
print(type(ret))
注:windows下如果数据被修改了,不会抛异常,只是返回结果的列表为空,mac和linux会直接抛异常
秒杀系统核心逻辑测试,创建100个线程并发秒杀
import redis
from threading import Thread
def choose(name, conn):
# conn.set('count',10)
with conn.pipeline() as pipe:
# 先监视,自己的值没有被修改过
conn.watch('count')
# 事务开始
pipe.multi()
old_count = conn.get('count')
count = int(old_count)
# input('我考虑一下')
# time.sleep(random.randint(1, 2))
if count > 0: # 有库存
pipe.set('count', count - 1)
# 执行,把所有命令一次性推送过去
ret = pipe.execute()
print(ret)
if len(ret) > 0:
print('第%s个人抢购成功' % name)
else:
print('第%s个人抢购失败' % name)
if __name__ == '__main__':
conn = redis.Redis(host='127.0.0.1', port=6379)
for i in range(100):
t = Thread(target=choose, args=(i, conn))
t.start()