[AI Embedchain] 集成 Streamlit

在这个例子中,我们将学习如何使用mistralai/Mixtral-8x7B-Instruct-v0.1和Embedchain与Streamlit一起构建一个简单的RAG聊天机器人。

Streamlit + Embedchain Demo

设置

安装Embedchain和Streamlit。

1
pip install embedchain streamlit

app.py

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import os
from embedchain import App
import streamlit as st

with st.sidebar:
    huggingface_access_token = st.text_input("Hugging face Token", key="chatbot_api_key", type="password")
    "[获取Hugging Face访问令牌](https://huggingface.co/settings/tokens)"
    "[查看源代码](https://github.com/embedchain/examples/mistral-streamlit)"

st.title("💬 聊天机器人")
st.caption("🚀 一个由Mistral提供支持的Embedchain应用!")
if "messages" not in st.session_state:
    st.session_state.messages = [
        {
            "role": "assistant",
            "content": """
        嗨!我是一个聊天机器人。我可以回答问题并学习新事物!\n
        问我任何事情,如果你想让我学习新东西,请输入`/add <source>`。\n
        我几乎可以学习一切。:)
        """,
        }
    ]

for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

if prompt := st.chat_input("问我任何事情!"):
    if not st.session_state.chatbot_api_key:
        st.error("请输入你的Hugging Face访问令牌")
        st.stop()

    os.environ["HUGGINGFACE_ACCESS_TOKEN"] = st.session_state.chatbot_api_key
    app = App.from_config(config_path="config.yaml")

    if prompt.startswith("/add"):
        with st.chat_message("user"):
            st.markdown(prompt)
            st.session_state.messages.append({"role": "user", "content": prompt})
        prompt = prompt.replace("/add", "").strip()
        with st.chat_message("assistant"):
            message_placeholder = st.empty()
            message_placeholder.markdown("添加到知识库中...")
            app.add(prompt)
            message_placeholder.markdown(f"已将{prompt}添加到知识库!")
            st.session_state.messages.append({"role": "assistant", "content": f"已将{prompt}添加到知识库!"})
            st.stop()

    with st.chat_message("user"):
        st.markdown(prompt)
        st.session_state.messages.append({"role": "user", "content": prompt})

    with st.chat_message("assistant"):
        msg_placeholder = st.empty()
        msg_placeholder.markdown("思考中...")
        full_response = ""

        for response in app.chat(prompt):
            msg_placeholder.empty()
            full_response += response

        msg_placeholder.markdown(full_response)
        st.session_state.messages.append({"role": "assistant", "content": full_response})

config.yaml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
app:
    config:
        name: 'mistral-streamlit-app'

llm:
    provider: huggingface
    config:
        model: 'mistralai/Mixtral-8x7B-Instruct-v0.1'
        temperature: 0.1
        max_tokens: 250
        top_p: 0.1
        stream: true

embedder:
    provider: huggingface
    config:
        model: 'sentence-transformers/all-mpnet-base-v2'

在本地运行

1
streamlit run app.py

引用