启动ollama
安装需要的模型,比如说千问
ollama pull qwen:4b
添加py文件 chat.py
import requests
import json
model_list = ['qwen:4b','deepseek-coder:6.7b']
url = 'http://localhost:11434/api/generate'
while True:
prompt = input("Enter your prompt: ")
if prompt == 'exit':
break
json_data = {
"model": model_list[0],
"prompt": prompt
}
response = requests.post(url, json=json_data)
print(response.status_code)
arr = response.text.split('\n')
result_arr = [json.loads(item)['response'] for item in arr[0:-1]]
print(''.join(result_arr))
python chat.py
image.png