docker run -d -t --name ollama --network=host --restart=always -v /home/data/docker/web:/root/.ollama ollama/ollama #加了 --restart=always 后, 就会自动启动 之前下载过的模型
docker logs ollama
docker start ollama
docker exec -ti ollama /bin/bash
apt update
apt install systemd systemd-sysv (先选6=Asia , 再选70=Shanghai)
date -R # 查看当前时间
//下载模型
ollama list
ollama run qwen3:0.6b
//数据流
curl http://localhost:11434/api/chat -d '{"stream": true,"model": "qwen3:0.6b", "messages": [ { "role": "user", "content": "你好呀" } ]}'
curl http://localhost:11434/api/chat -d '{"stream": false,"model": "qwen3:0.6b", "messages": [ { "role": "user", "content": "你好呀/no_think" } ]}'
# 使用 curl 调用文本生成接口
curl http://localhost:11434/api/generate -d '{
"model": "qwen3:0.6b",
"prompt": "为什么天空是蓝色的?",
"stream": false //是否是流输出
}'如果你已安装:conda的python环境用docker方式安装: http://www.diyyq.com/kaifahuanjing/9.html
#声音生成文字
'''
pip install funasr -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install "numpy<1.25" -i https://pypi.tuna.tsinghua.edu.cn/simple
'''
from funasr import AutoModel
model = AutoModel(model="./llm/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", model_revision="v2.0.4")
wav_file = "./llm/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav"
res = model.generate(input=wav_file)
print(res)
#文字生成声音

发表评论 取消回复