From de9636e157a9c09941e98f682e3d8aeccba5c474 Mon Sep 17 00:00:00 2001 From: Carson Sievert Date: Fri, 20 Dec 2024 16:56:17 -0600 Subject: [PATCH] Update chat templates and examples (#1806) --- examples/chat/.gitignore | 1 - examples/chat/RAG/recipes/app.py | 60 ---------- examples/chat/RAG/recipes/requirements.txt | 4 - examples/chat/RAG/recipes/utils.py | 106 ------------------ examples/chat/README.md | 21 ---- examples/chat/playground/app.py | 82 -------------- examples/chat/playground/requirements.txt | 4 - examples/chat/ui/clear/app.py | 49 -------- examples/chat/ui/clear/requirements.txt | 2 - examples/chat/ui/dark/app.py | 41 ------- examples/chat/ui/dark/requirements.txt | 2 - examples/chat/ui/dynamic/app.py | 40 ------- examples/chat/ui/dynamic/requirements.txt | 2 - examples/chat/ui/sidebar/app.py | 48 -------- examples/chat/ui/sidebar/requirements.txt | 2 - shiny/_main_create.py | 42 +++---- shiny/api-examples/chat/app-core.py | 31 ----- shiny/api-examples/chat/app-express.py | 35 ------ .../chat/enterprise/azure-openai/app.py | 58 ---------- .../chat/hello-providers/anthropic/app.py | 44 -------- .../chat/hello-providers/gemini/app.py | 42 ------- .../chat/hello-providers/ollama/app.py | 37 ------ .../chat/hello-providers/openai/app.py | 50 --------- .../aws-bedrock-anthropic/_template.json | 0 .../aws-bedrock-anthropic/app.py | 18 +-- .../aws-bedrock-anthropic/app_utils.py | 0 .../aws-bedrock-anthropic/requirements.txt | 3 +- .../azure-openai/_template.json | 0 .../chat/llm-enterprise/azure-openai/app.py | 41 +++++++ .../azure-openai/app_utils.py | 0 .../azure-openai/requirements.txt | 1 + .../anthropic/_template.json | 0 shiny/templates/chat/llms/anthropic/app.py | 41 +++++++ .../anthropic/app_utils.py | 0 .../anthropic/requirements.txt | 1 + .../gemini => llms/google}/_template.json | 0 shiny/templates/chat/llms/google/app.py | 37 ++++++ .../gemini => llms/google}/app_utils.py | 0 .../gemini => llms/google}/requirements.txt | 1 + .../langchain/_template.json | 0 .../langchain/app.py | 20 ++-- .../langchain/app_utils.py | 0 .../langchain/requirements.txt | 0 .../ollama/_template.json | 0 shiny/templates/chat/llms/ollama/app.py | 33 ++++++ .../ollama/requirements.txt | 1 + .../openai/_template.json | 0 shiny/templates/chat/llms/openai/app.py | 41 +++++++ .../openai/app_utils.py | 0 .../openai/requirements.txt | 1 + .../chat/llms/playground/_template.json | 5 + shiny/templates/chat/llms/playground/app.py | 106 ++++++++++++++++++ .../playground}/app_utils.py | 0 .../chat/llms/playground/requirements.txt | 6 + .../chat/production/anthropic/_template.json | 5 - .../chat/production/anthropic/app.py | 65 ----------- .../production/anthropic/requirements.txt | 4 - .../chat/production/openai/_template.json | 5 - shiny/templates/chat/production/openai/app.py | 56 --------- .../chat/production/openai/app_utils.py | 26 ----- .../chat/production/openai/requirements.txt | 4 - .../chat/starters/hello/_template.json | 5 + .../chat/starters/hello}/app-core.py | 8 +- .../templates/chat/starters/hello}/app.py | 8 +- .../chat/starters/hello}/requirements.txt | 0 .../chat/starters/sidebar-dark/_template.json | 5 + .../chat/starters/sidebar-dark/app.py | 30 +++++ shiny/ui/_chat.py | 6 +- 68 files changed, 403 insertions(+), 983 deletions(-) delete mode 100644 examples/chat/.gitignore delete mode 100644 examples/chat/RAG/recipes/app.py delete mode 100644 examples/chat/RAG/recipes/requirements.txt delete mode 100644 examples/chat/RAG/recipes/utils.py delete mode 100644 examples/chat/README.md delete mode 100644 examples/chat/playground/app.py delete mode 100644 examples/chat/playground/requirements.txt delete mode 100644 examples/chat/ui/clear/app.py delete mode 100644 examples/chat/ui/clear/requirements.txt delete mode 100644 examples/chat/ui/dark/app.py delete mode 100644 examples/chat/ui/dark/requirements.txt delete mode 100644 examples/chat/ui/dynamic/app.py delete mode 100644 examples/chat/ui/dynamic/requirements.txt delete mode 100644 examples/chat/ui/sidebar/app.py delete mode 100644 examples/chat/ui/sidebar/requirements.txt delete mode 100644 shiny/api-examples/chat/app-core.py delete mode 100644 shiny/api-examples/chat/app-express.py delete mode 100644 shiny/templates/chat/enterprise/azure-openai/app.py delete mode 100644 shiny/templates/chat/hello-providers/anthropic/app.py delete mode 100644 shiny/templates/chat/hello-providers/gemini/app.py delete mode 100644 shiny/templates/chat/hello-providers/ollama/app.py delete mode 100644 shiny/templates/chat/hello-providers/openai/app.py rename shiny/templates/chat/{enterprise => llm-enterprise}/aws-bedrock-anthropic/_template.json (100%) rename shiny/templates/chat/{enterprise => llm-enterprise}/aws-bedrock-anthropic/app.py (75%) rename shiny/templates/chat/{enterprise => llm-enterprise}/aws-bedrock-anthropic/app_utils.py (100%) rename shiny/templates/chat/{enterprise => llm-enterprise}/aws-bedrock-anthropic/requirements.txt (53%) rename shiny/templates/chat/{enterprise => llm-enterprise}/azure-openai/_template.json (100%) create mode 100644 shiny/templates/chat/llm-enterprise/azure-openai/app.py rename shiny/templates/chat/{enterprise => llm-enterprise}/azure-openai/app_utils.py (100%) rename shiny/templates/chat/{enterprise => llm-enterprise}/azure-openai/requirements.txt (82%) rename shiny/templates/chat/{hello-providers => llms}/anthropic/_template.json (100%) create mode 100644 shiny/templates/chat/llms/anthropic/app.py rename shiny/templates/chat/{hello-providers => llms}/anthropic/app_utils.py (100%) rename shiny/templates/chat/{hello-providers => llms}/anthropic/requirements.txt (83%) rename shiny/templates/chat/{hello-providers/gemini => llms/google}/_template.json (100%) create mode 100644 shiny/templates/chat/llms/google/app.py rename shiny/templates/chat/{hello-providers/gemini => llms/google}/app_utils.py (100%) rename shiny/templates/chat/{hello-providers/gemini => llms/google}/requirements.txt (86%) rename shiny/templates/chat/{hello-providers => llms}/langchain/_template.json (100%) rename shiny/templates/chat/{hello-providers => llms}/langchain/app.py (75%) rename shiny/templates/chat/{hello-providers => llms}/langchain/app_utils.py (100%) rename shiny/templates/chat/{hello-providers => llms}/langchain/requirements.txt (100%) rename shiny/templates/chat/{hello-providers => llms}/ollama/_template.json (100%) create mode 100644 shiny/templates/chat/llms/ollama/app.py rename shiny/templates/chat/{hello-providers => llms}/ollama/requirements.txt (75%) rename shiny/templates/chat/{hello-providers => llms}/openai/_template.json (100%) create mode 100644 shiny/templates/chat/llms/openai/app.py rename shiny/templates/chat/{hello-providers => llms}/openai/app_utils.py (100%) rename shiny/templates/chat/{hello-providers => llms}/openai/requirements.txt (82%) create mode 100644 shiny/templates/chat/llms/playground/_template.json create mode 100644 shiny/templates/chat/llms/playground/app.py rename shiny/templates/chat/{production/anthropic => llms/playground}/app_utils.py (100%) create mode 100644 shiny/templates/chat/llms/playground/requirements.txt delete mode 100644 shiny/templates/chat/production/anthropic/_template.json delete mode 100644 shiny/templates/chat/production/anthropic/app.py delete mode 100644 shiny/templates/chat/production/anthropic/requirements.txt delete mode 100644 shiny/templates/chat/production/openai/_template.json delete mode 100644 shiny/templates/chat/production/openai/app.py delete mode 100644 shiny/templates/chat/production/openai/app_utils.py delete mode 100644 shiny/templates/chat/production/openai/requirements.txt create mode 100644 shiny/templates/chat/starters/hello/_template.json rename {examples/chat/hello-world => shiny/templates/chat/starters/hello}/app-core.py (80%) rename {examples/chat/hello-world => shiny/templates/chat/starters/hello}/app.py (81%) rename {examples/chat/hello-world => shiny/templates/chat/starters/hello}/requirements.txt (100%) create mode 100644 shiny/templates/chat/starters/sidebar-dark/_template.json create mode 100644 shiny/templates/chat/starters/sidebar-dark/app.py diff --git a/examples/chat/.gitignore b/examples/chat/.gitignore deleted file mode 100644 index 4c49bd78f..000000000 --- a/examples/chat/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.env diff --git a/examples/chat/RAG/recipes/app.py b/examples/chat/RAG/recipes/app.py deleted file mode 100644 index b9c8e7f2a..000000000 --- a/examples/chat/RAG/recipes/app.py +++ /dev/null @@ -1,60 +0,0 @@ -# ------------------------------------------------------------------------------------ -# A simple recipe extractor chatbot that extracts recipes from URLs using the OpenAI API. -# To run it, you'll need an OpenAI API key. -# To get one, follow the instructions at https://platform.openai.com/docs/quickstart -# ------------------------------------------------------------------------------------ -import os - -from openai import AsyncOpenAI -from utils import recipe_prompt, scrape_page_with_url - -from shiny.express import ui - -# Provide your API key here (or set the environment variable) -llm = AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY")) - -# Set some Shiny page options -ui.page_opts( - title="Recipe Extractor Chat", - fillable=True, - fillable_mobile=True, -) - -# Initialize the chat (with a system prompt and starting message) -chat = ui.Chat( - id="chat", - messages=[ - {"role": "system", "content": recipe_prompt}, - { - "role": "assistant", - "content": "Hello! I'm a recipe extractor. Please enter a URL to a recipe page. For example, ", - }, - ], -) - -chat.ui(placeholder="Enter a recipe URL...") - - -# A function to transform user input -# Note that, if an exception occurs, the function will return a message to the user -# "short-circuiting" the conversation and asking the user to try again. -@chat.transform_user_input -async def try_scrape_page(input: str) -> str | None: - try: - return await scrape_page_with_url(input) - except Exception: - await chat.append_message( - "I'm sorry, I couldn't extract content from that URL. Please try again. " - ) - return None - - -@chat.on_user_submit -async def _(): - response = await llm.chat.completions.create( - model="gpt-4o", - messages=chat.messages(format="openai"), - temperature=0, - stream=True, - ) - await chat.append_message_stream(response) diff --git a/examples/chat/RAG/recipes/requirements.txt b/examples/chat/RAG/recipes/requirements.txt deleted file mode 100644 index 54b14a013..000000000 --- a/examples/chat/RAG/recipes/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -aiohttp -bs4 -openai -shiny diff --git a/examples/chat/RAG/recipes/utils.py b/examples/chat/RAG/recipes/utils.py deleted file mode 100644 index 9c522cf32..000000000 --- a/examples/chat/RAG/recipes/utils.py +++ /dev/null @@ -1,106 +0,0 @@ -import aiohttp -from bs4 import BeautifulSoup - -recipe_prompt = """ -You are RecipeExtractorGPT. -Your goal is to extract recipe content from text and return a JSON representation of the useful information. - -The JSON should be structured like this: - -``` -{ - "title": "Scrambled eggs", - "ingredients": { - "eggs": "2", - "butter": "1 tbsp", - "milk": "1 tbsp", - "salt": "1 pinch" - }, - "directions": [ - "Beat eggs, milk, and salt together in a bowl until thoroughly combined.", - "Heat butter in a large skillet over medium-high heat. Pour egg mixture into the hot skillet; cook and stir until eggs are set, 3 to 5 minutes." - ], - "servings": 2, - "prep_time": 5, - "cook_time": 5, - "total_time": 10, - "tags": [ - "breakfast", - "eggs", - "scrambled" - ], - "source": "https://recipes.com/scrambled-eggs/", -} -``` - -The user will provide text content from a web page. -It is not very well structured, but the recipe is in there. -Please look carefully for the useful information about the recipe. -IMPORTANT: Return the result as JSON in a Markdown code block surrounded with three backticks! -""" - - -async def scrape_page_with_url(url: str, max_length: int = 14000) -> str: - """ - Given a URL, scrapes the web page and return the contents. This also adds adds the - URL to the beginning of the text. - - Parameters - ---------- - url: - The URL to scrape - max_length: - Max length of recipe text to process. This is to prevent the model from running - out of tokens. 14000 bytes translates to approximately 3200 tokens. - """ - contents = await scrape_page(url) - # Trim the string so that the prompt and reply will fit in the token limit.. It - # would be better to trim by tokens, but that requires using the tiktoken package, - # which can be very slow to load when running on containerized servers, because it - # needs to download the model from the internet each time the container starts. - contents = contents[:max_length] - return f"From: {url}\n\n" + contents - - -async def scrape_page(url: str) -> str: - # Asynchronously send an HTTP request to the URL. - async with aiohttp.ClientSession() as session: - async with session.get(url) as response: - if response.status != 200: - raise aiohttp.ClientError(f"An error occurred: {response.status}") - html = await response.text() - - # Parse the HTML content using BeautifulSoup - soup = BeautifulSoup(html, "html.parser") - - # Remove script and style elements - for script in soup(["script", "style"]): - script.decompose() - - # List of element IDs or class names to remove - elements_to_remove = [ - "header", - "footer", - "sidebar", - "nav", - "menu", - "ad", - "advertisement", - "cookie-banner", - "popup", - "social", - "breadcrumb", - "pagination", - "comment", - "comments", - ] - - # Remove unwanted elements by ID or class name - for element in elements_to_remove: - for e in soup.find_all(id=element) + soup.find_all(class_=element): - e.decompose() - - # Extract text from the remaining HTML tags - text = " ".join(soup.stripped_strings) - - return text diff --git a/examples/chat/README.md b/examples/chat/README.md deleted file mode 100644 index b0c2f4907..000000000 --- a/examples/chat/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Shiny `Chat` examples - - -This folder contains a collection of examples illustrating `shiny.ui.Chat` usage. Many of them require API keys from providers such as OpenAI, Anthropic, etc. In those cases, the example should have commentary explaining how to obtain keys as well as how to provide them to the app. - -To get started with an app that doesn't require an API key, see the `hello-world` example. This example has both a Shiny Core and Express app to illustrate how it's used in either mode. - - ------------------------ - -## Apps - -* [hello-world](hello-world): A simple chat app that echoes back the user's input. -* [playground](playground): A playground for testing out different chat models: `openai`, `claude`, and `google`. -* RAG - * [recipes](RAG/recipes): A simple recipe extractor chatbot that extracts recipes from URLs using the OpenAI API. -* UI - * [clear](ui/clear): This example demonstrates how to clear the chat when the model changes. - * [dark](ui/dark): This example demonstrates Shiny Chat's dark mode capability. - * [dynamic](ui/dynamic): A basic example of dynamically re-rendering a Shiny Chat instance with different models. - * [sidebar](ui/sidebar): An example of placing a Shiny Chat instance in a sidebar (and having it fill the sidebar). diff --git a/examples/chat/playground/app.py b/examples/chat/playground/app.py deleted file mode 100644 index af205824b..000000000 --- a/examples/chat/playground/app.py +++ /dev/null @@ -1,82 +0,0 @@ -# ------------------------------------------------------------------------------------ -# A Shiny Chat example showing how to use different language models via LangChain. -# To run it with all the different providers/models, you'll need API keys for each. -# Namely, OPENAI_API_KEY, ANTHROPIC_API_KEY, and GOOGLE_API_KEY. -# To see how to get these keys, see the relevant basic examples. -# (i.e., ../basic/openai/app.py, ../basic/anthropic/app.py, ../basic/gemini/app.py) -# ------------------------------------------------------------------------------------ - -from langchain_anthropic import ChatAnthropic -from langchain_google_vertexai import VertexAI -from langchain_openai import ChatOpenAI - -from shiny.express import input, render, ui - -models = { - "openai": ["gpt-4o", "gpt-3.5-turbo"], - "claude": [ - "claude-3-opus-latest", - "claude-3-5-sonnet-latest", - "claude-3-haiku-20240307", - ], - "google": ["gemini-1.5-pro-latest"], -} - -model_choices: dict[str, dict[str, str]] = {} -for key, value in models.items(): - model_choices[key] = dict(zip(value, value)) - -ui.page_opts( - title="Shiny Chat Playground", - fillable=True, - fillable_mobile=True, -) - -with ui.sidebar(position="right"): - ui.input_select("model", "Model", choices=model_choices) - ui.input_select( - "system_actor", - "Response style", - choices=["Chuck Norris", "Darth Vader", "Yoda", "Gandalf", "Sherlock Holmes"], - ) - ui.input_switch("stream", "Stream", value=False) - ui.input_slider("temperature", "Temperature", min=0, max=2, step=0.1, value=1) - ui.input_slider("max_tokens", "Max Tokens", min=1, max=4096, step=1, value=100) - - -@render.express(fill=True, fillable=True) -def chat_ui(): - system_message = { - "content": f""" - You are a helpful AI assistant. Provide answers in the style of {input.system_actor()}. - """, - "role": "system", - } - chat = ui.Chat(id="chat", messages=[system_message]) - - model_params = { - "model": input.model(), - "temperature": input.temperature(), - "max_tokens": input.max_tokens(), - } - - if input.model() in models["openai"]: - llm = ChatOpenAI(**model_params) - elif input.model() in models["claude"]: - llm = ChatAnthropic(**model_params) - elif input.model() in models["google"]: - llm = VertexAI(**model_params) - else: - raise ValueError(f"Invalid model: {input.model()}") - - @chat.on_user_submit - async def _(): - messages = chat.messages(format="langchain") - if input.stream(): - response = llm.astream(messages) - await chat.append_message_stream(response) - else: - response = await llm.ainvoke(messages) - await chat.append_message(response) - - chat.ui() diff --git a/examples/chat/playground/requirements.txt b/examples/chat/playground/requirements.txt deleted file mode 100644 index 29bb799b8..000000000 --- a/examples/chat/playground/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -langchain_anthropic -langchain_google_vertexai -langchain_openai -shiny diff --git a/examples/chat/ui/clear/app.py b/examples/chat/ui/clear/app.py deleted file mode 100644 index fb4ac8137..000000000 --- a/examples/chat/ui/clear/app.py +++ /dev/null @@ -1,49 +0,0 @@ -# -------------------------------------------------------------------------------- -# This example demonstrates how to clear the chat when the model changes. -# To run it, you'll need an OpenAI API key. -# To get one, follow the instructions at https://platform.openai.com/docs/quickstart -# -------------------------------------------------------------------------------- -import os - -from langchain_openai import ChatOpenAI - -from shiny import reactive -from shiny.express import input, ui - -# Provide your API key here (or set the environment variable) -llm = ChatOpenAI( - api_key=os.environ.get("OPENAI_API_KEY"), # type: ignore -) - -# Set some Shiny page options -ui.page_opts( - title="Hello OpenAI Chat", - fillable=True, - fillable_mobile=True, -) - -# Create a sidebar to select the model -with ui.sidebar(): - ui.input_select("model", "Model", ["gpt-4o", "gpt-3.5-turbo"]) - -# Create and display an empty chat UI -chat = ui.Chat(id="chat") -chat.ui() - - -# Define a callback to run when the user submits a message -@chat.on_user_submit -async def _(): - # Get messages currently in the chat - messages = chat.messages(format="langchain") - # Create a response message stream - response = llm.astream(messages) - # Append the response stream into the chat - await chat.append_message_stream(response) - - -# Clear the chat when the model changes -@reactive.effect -@reactive.event(input.model) -async def _(): - await chat.clear_messages() diff --git a/examples/chat/ui/clear/requirements.txt b/examples/chat/ui/clear/requirements.txt deleted file mode 100644 index 74bdc42cd..000000000 --- a/examples/chat/ui/clear/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -langchain_openai -shiny diff --git a/examples/chat/ui/dark/app.py b/examples/chat/ui/dark/app.py deleted file mode 100644 index 6ee93f343..000000000 --- a/examples/chat/ui/dark/app.py +++ /dev/null @@ -1,41 +0,0 @@ -# -------------------------------------------------------------------------------- -# This example demonstrates Shiny Chat's dark mode capability. -# To run it, you'll need an OpenAI API key. -# To get one, follow the instructions at https://platform.openai.com/docs/quickstart -# -------------------------------------------------------------------------------- -import os - -from langchain_openai import ChatOpenAI - -from shiny.express import ui - -# Provide your API key here (or set the environment variable) -llm = ChatOpenAI( - api_key=os.environ.get("OPENAI_API_KEY"), # type: ignore -) - -# Set some Shiny page options -ui.page_opts( - title="Hello dark mode!", - fillable=True, - fillable_mobile=True, -) - -# Create a sidebar to select the dark mode -with ui.sidebar(open="closed", position="right", width="100px"): - ui.tags.label("Dark mode", ui.input_dark_mode(mode="dark")) - -# Create and display an empty chat UI -chat = ui.Chat(id="chat") -chat.ui() - - -# Define a callback to run when the user submits a message -@chat.on_user_submit -async def _(): - # Get messages currently in the chat - messages = chat.messages(format="langchain") - # Create a response message stream - stream = llm.astream(messages) - # Append the response stream into the chat - await chat.append_message_stream(stream) diff --git a/examples/chat/ui/dark/requirements.txt b/examples/chat/ui/dark/requirements.txt deleted file mode 100644 index 74bdc42cd..000000000 --- a/examples/chat/ui/dark/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -langchain_openai -shiny diff --git a/examples/chat/ui/dynamic/app.py b/examples/chat/ui/dynamic/app.py deleted file mode 100644 index b3a1597ca..000000000 --- a/examples/chat/ui/dynamic/app.py +++ /dev/null @@ -1,40 +0,0 @@ -# ----------------------------------------------------------------------------- -# A basic example of dynamically re-rendering a Shiny Chat instance with different models. -# To run it, you'll need an OpenAI API key. -# To get one, follow the instructions at https://platform.openai.com/docs/quickstart -# ----------------------------------------------------------------------------- -import os - -from langchain_openai import ChatOpenAI - -from shiny.express import input, render, ui - -ui.input_select("model", "Model", choices=["gpt-4o", "gpt-3.5-turbo"]) - - -@render.express -def chat_ui(): - - chat = ui.Chat( - id="chat", - messages=[ - { - "content": f"Hi! I'm a {input.model()} model. How can I help you today?", - "role": "assistant", - } - ], - ) - - chat.ui() - - llm = ChatOpenAI( - model=input.model(), - # Provide your API key here (or set the environment variable) - api_key=os.environ.get("OPENAI_API_KEY"), # type: ignore - ) - - @chat.on_user_submit - async def _(): - messages = chat.messages(format="langchain") - response = llm.astream(messages) - await chat.append_message_stream(response) diff --git a/examples/chat/ui/dynamic/requirements.txt b/examples/chat/ui/dynamic/requirements.txt deleted file mode 100644 index 74bdc42cd..000000000 --- a/examples/chat/ui/dynamic/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -langchain_openai -shiny diff --git a/examples/chat/ui/sidebar/app.py b/examples/chat/ui/sidebar/app.py deleted file mode 100644 index 84eab2bc7..000000000 --- a/examples/chat/ui/sidebar/app.py +++ /dev/null @@ -1,48 +0,0 @@ -# ----------------------------------------------------------------------------- -# An example of placing a Shiny Chat instance in a sidebar (and having it fill the sidebar). -# To run it, you'll need an OpenAI API key. -# To get one, follow the instructions at https://platform.openai.com/docs/quickstart -# ----------------------------------------------------------------------------- -import os - -from langchain_openai import ChatOpenAI - -from shiny.express import ui - -# Provide your API key here (or set the environment variable) -llm = ChatOpenAI( - api_key=os.environ.get("OPENAI_API_KEY"), # type: ignore -) - -# Set some Shiny page options -ui.page_opts( - title="Hello Sidebar Chat", - fillable=True, - fillable_mobile=True, -) - -# Create a chat instance, with an initial message -chat = ui.Chat( - id="chat", - messages=[ - {"content": "Hello! How can I help you today?", "role": "assistant"}, - ], -) - -# Display the chat in a sidebar -with ui.sidebar(width=300, style="height:100%", position="right"): - chat.ui(height="100%") - - -# Define a callback to run when the user submits a message -@chat.on_user_submit -async def _(): - # Get messages currently in the chat - messages = chat.messages(format="langchain") - # Create a response message stream - response = llm.astream(messages) - # Append the response stream into the chat - await chat.append_message_stream(response) - - -"Lorem ipsum dolor sit amet, consectetur adipiscing elit" diff --git a/examples/chat/ui/sidebar/requirements.txt b/examples/chat/ui/sidebar/requirements.txt deleted file mode 100644 index 74bdc42cd..000000000 --- a/examples/chat/ui/sidebar/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -langchain_openai -shiny diff --git a/shiny/_main_create.py b/shiny/_main_create.py index 84c66c5ee..8156521df 100644 --- a/shiny/_main_create.py +++ b/shiny/_main_create.py @@ -224,16 +224,16 @@ def packages(self) -> list[ShinyTemplate]: return self._templates("templates/package") @property - def chat_hello_providers(self) -> list[ShinyTemplate]: - return self._templates("templates/chat/hello-providers") + def chat_starters(self) -> list[ShinyTemplate]: + return self._templates("templates/chat/starters") @property - def chat_enterprise(self) -> list[ShinyTemplate]: - return self._templates("templates/chat/enterprise") + def chat_llms(self) -> list[ShinyTemplate]: + return self._templates("templates/chat/llms") @property - def chat_production(self) -> list[ShinyTemplate]: - return self._templates("templates/chat/production") + def chat_enterprise(self) -> list[ShinyTemplate]: + return self._templates("templates/chat/llm-enterprise") shiny_internal_templates = ShinyInternalTemplates() @@ -262,14 +262,14 @@ def use_internal_template( app_templates = shiny_internal_templates.apps pkg_templates = shiny_internal_templates.packages chat_templates = [ - *shiny_internal_templates.chat_hello_providers, + *shiny_internal_templates.chat_starters, + *shiny_internal_templates.chat_llms, *shiny_internal_templates.chat_enterprise, - *shiny_internal_templates.chat_production, ] menu_choices = [ Choice(title="Custom JavaScript component...", value="_js-component"), - Choice(title="Generative AI templates...", value="_chat-ai"), + Choice(title="Chat component templates...", value="_chat"), Choice( title="Choose from the Shiny Templates website", value="_external-gallery" ), @@ -302,7 +302,7 @@ def use_internal_template( sys.exit(0) elif question_state == "_js-component": use_internal_package_template(dest_dir=dest_dir, package_name=package_name) - elif question_state == "_chat-ai": + elif question_state == "_chat": use_internal_chat_ai_template(dest_dir=dest_dir, package_name=package_name) else: valid_choices = [t.id for t in app_templates + pkg_templates] @@ -352,11 +352,11 @@ def use_internal_chat_ai_template( ): if input is None: input = questionary.select( - "Which kind of generative AI template would you like to use?", + "Which kind of chat template would you like?", choices=[ - Choice(title="By provider...", value="_chat-ai_hello-providers"), - Choice(title="Enterprise providers...", value="_chat-ai_enterprise"), - Choice(title="Production-ready chat AI", value="_chat-ai_production"), + Choice(title="Chat starters...", value="_chat-starters"), + Choice(title="LLM powered chat...", value="_chat-llms"), + Choice(title="Enterprise LLM...", value="_chat-llm_enterprise"), back_choice, cancel_choice, ], @@ -375,12 +375,12 @@ def use_internal_chat_ai_template( ) return - if input == "_chat-ai_production": - template_choices = shiny_internal_templates.chat_production - elif input == "_chat-ai_enterprise": - template_choices = shiny_internal_templates.chat_enterprise + if input == "_chat-starters": + template_choices = shiny_internal_templates.chat_starters + elif input == "_chat-llms": + template_choices = shiny_internal_templates.chat_llms else: - template_choices = shiny_internal_templates.chat_hello_providers + template_choices = shiny_internal_templates.chat_enterprise choice = question_choose_template(template_choices, back_choice) @@ -390,9 +390,9 @@ def use_internal_chat_ai_template( template = template_by_name( [ - *shiny_internal_templates.chat_hello_providers, + *shiny_internal_templates.chat_starters, + *shiny_internal_templates.chat_llms, *shiny_internal_templates.chat_enterprise, - *shiny_internal_templates.chat_production, ], choice, ) diff --git a/shiny/api-examples/chat/app-core.py b/shiny/api-examples/chat/app-core.py deleted file mode 100644 index 17d8395fb..000000000 --- a/shiny/api-examples/chat/app-core.py +++ /dev/null @@ -1,31 +0,0 @@ -from shiny import App, ui - -app_ui = ui.page_fillable( - ui.panel_title("Hello Shiny Chat"), - ui.chat_ui("chat"), - fillable_mobile=True, -) - -# Create a welcome message -welcome = ui.markdown( - """ - Hi! This is a simple Shiny `Chat` UI. Enter a message below and I will - simply repeat it back to you. For more examples, see this - [folder of examples](https://github.com/posit-dev/py-shiny/tree/main/examples/chat). - """ -) - - -def server(input, output, session): - chat = ui.Chat(id="chat", messages=[welcome]) - - # Define a callback to run when the user submits a message - @chat.on_user_submit - async def _(): - # Get the user's input - user = chat.user_input() - # Append a response to the chat - await chat.append_message(f"You said: {user}") - - -app = App(app_ui, server) diff --git a/shiny/api-examples/chat/app-express.py b/shiny/api-examples/chat/app-express.py deleted file mode 100644 index 3eabe7c00..000000000 --- a/shiny/api-examples/chat/app-express.py +++ /dev/null @@ -1,35 +0,0 @@ -from shiny.express import ui - -# Set some Shiny page options -ui.page_opts( - title="Hello Shiny Chat", - fillable=True, - fillable_mobile=True, -) - -# Create a welcome message -welcome = ui.markdown( - """ - Hi! This is a simple Shiny `Chat` UI. Enter a message below and I will - simply repeat it back to you. For more examples, see this - [folder of examples](https://github.com/posit-dev/py-shiny/tree/main/examples/chat). - """ -) - -# Create a chat instance -chat = ui.Chat( - id="chat", - messages=[welcome], -) - -# Display it -chat.ui() - - -# Define a callback to run when the user submits a message -@chat.on_user_submit -async def _(): - # Get the user's input - user = chat.user_input() - # Append a response to the chat - await chat.append_message(f"You said: {user}") diff --git a/shiny/templates/chat/enterprise/azure-openai/app.py b/shiny/templates/chat/enterprise/azure-openai/app.py deleted file mode 100644 index d5c9f3a19..000000000 --- a/shiny/templates/chat/enterprise/azure-openai/app.py +++ /dev/null @@ -1,58 +0,0 @@ -# ------------------------------------------------------------------------------------ -# A basic Shiny Chat example powered by OpenAI running on Azure. -# To run it, you'll need OpenAI API key. -# To get setup, follow the instructions at https://learn.microsoft.com/en-us/azure/ai-services/openai/quickstart?tabs=command-line%2Cpython-new&pivots=programming-language-python#create-a-new-python-application -# ------------------------------------------------------------------------------------ -import os - -from app_utils import load_dotenv -from openai import AzureOpenAI - -from shiny.express import ui - -# Either explicitly set the AZURE_OPENAI_API_KEY and AZURE_OPENAI_ENDPOINT environment -# variables before launching the app, or set them in a file named `.env`. The -# `python-dotenv` package will load `.env` as environment variables which can later be -# read by `os.getenv()`. -load_dotenv() - -llm = AzureOpenAI( - api_key=os.getenv("AZURE_OPENAI_API_KEY"), - api_version="2024-02-01", - azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), # type: ignore -) - -deployment_name = "REPLACE_WITH_YOUR_DEPLOYMENT_NAME" - -# Set some Shiny page options -ui.page_opts( - title="Hello OpenAI Chat", - fillable=True, - fillable_mobile=True, -) - -# Create a chat instance, with an initial message -chat = ui.Chat( - id="chat", - messages=[ - {"content": "Hello! How can I help you today?", "role": "assistant"}, - ], -) - -# Display the chat -chat.ui() - - -# Define a callback to run when the user submits a message -@chat.on_user_submit -async def _(): - # Get messages currently in the chat - messages = chat.messages(format="openai") - # Create a response message stream - response = llm.chat.completions.create( - model=deployment_name, - messages=messages, - stream=True, - ) - # Append the response stream into the chat - await chat.append_message_stream(response) diff --git a/shiny/templates/chat/hello-providers/anthropic/app.py b/shiny/templates/chat/hello-providers/anthropic/app.py deleted file mode 100644 index e3bae5966..000000000 --- a/shiny/templates/chat/hello-providers/anthropic/app.py +++ /dev/null @@ -1,44 +0,0 @@ -# ------------------------------------------------------------------------------------ -# A basic Shiny Chat example powered by Anthropic's Claude model. -# To run it, you'll need an Anthropic API key. -# To get one, follow the instructions at https://docs.anthropic.com/en/api/getting-started -# ------------------------------------------------------------------------------------ -import os - -from anthropic import AsyncAnthropic -from app_utils import load_dotenv - -from shiny.express import ui - -# Either explicitly set the ANTHROPIC_API_KEY environment variable before launching the -# app, or set them in a file named `.env`. The `python-dotenv` package will load `.env` -# as environment variables which can later be read by `os.getenv()`. -load_dotenv() -llm = AsyncAnthropic(api_key=os.environ.get("ANTHROPIC_API_KEY")) - -# Set some Shiny page options -ui.page_opts( - title="Hello Anthropic Claude Chat", - fillable=True, - fillable_mobile=True, -) - -# Create and display empty chat -chat = ui.Chat(id="chat") -chat.ui() - - -# Define a callback to run when the user submits a message -@chat.on_user_submit -async def _(): - # Get messages currently in the chat - messages = chat.messages(format="anthropic") - # Create a response message stream - response = await llm.messages.create( - model="claude-3-5-sonnet-latest", - messages=messages, - stream=True, - max_tokens=1000, - ) - # Append the response stream into the chat - await chat.append_message_stream(response) diff --git a/shiny/templates/chat/hello-providers/gemini/app.py b/shiny/templates/chat/hello-providers/gemini/app.py deleted file mode 100644 index 2240c8f52..000000000 --- a/shiny/templates/chat/hello-providers/gemini/app.py +++ /dev/null @@ -1,42 +0,0 @@ -# ------------------------------------------------------------------------------------ -# A basic Shiny Chat example powered by Google's Gemini model. -# To run it, you'll need a Google API key. -# To get one, follow the instructions at https://ai.google.dev/gemini-api/docs/get-started/tutorial?lang=python -# ------------------------------------------------------------------------------------ -from app_utils import load_dotenv -from google.generativeai import GenerativeModel - -from shiny.express import ui - -# Either explicitly set the GOOGLE_API_KEY environment variable before launching the -# app, or set them in a file named `.env`. The `python-dotenv` package will load `.env` -# as environment variables which can later be read by `os.getenv()`. -load_dotenv() -llm = GenerativeModel() - -# Set some Shiny page options -ui.page_opts( - title="Hello Google Gemini Chat", - fillable=True, - fillable_mobile=True, -) - -# Create and display empty chat -chat = ui.Chat(id="chat") -chat.ui() - - -# Define a callback to run when the user submits a message -@chat.on_user_submit -async def _(): - # Get messages currently in the chat - contents = chat.messages(format="google") - - # Generate a response message stream - response = llm.generate_content( - contents=contents, - stream=True, - ) - - # Append the response stream into the chat - await chat.append_message_stream(response) diff --git a/shiny/templates/chat/hello-providers/ollama/app.py b/shiny/templates/chat/hello-providers/ollama/app.py deleted file mode 100644 index 25d1e37ff..000000000 --- a/shiny/templates/chat/hello-providers/ollama/app.py +++ /dev/null @@ -1,37 +0,0 @@ -# ------------------------------------------------------------------------------------ -# A basic Shiny Chat example powered by Ollama. -# To run it, you'll need an Ollama server running locally. -# To download and run the server, see https://github.com/ollama/ollama -# To install the Ollama Python client, see https://github.com/ollama/ollama-python -# ------------------------------------------------------------------------------------ - -import ollama - -from shiny.express import ui - -# Set some Shiny page options -ui.page_opts( - title="Hello Ollama Chat", - fillable=True, - fillable_mobile=True, -) - -# Create and display empty chat -chat = ui.Chat(id="chat") -chat.ui() - - -# Define a callback to run when the user submits a message -@chat.on_user_submit -async def _(): - # Get messages currently in the chat - messages = chat.messages(format="ollama") - # Create a response message stream - # Assumes you've run `ollama run llama3` to start the server - response = ollama.chat( - model="llama3.2", - messages=messages, - stream=True, - ) - # Append the response stream into the chat - await chat.append_message_stream(response) diff --git a/shiny/templates/chat/hello-providers/openai/app.py b/shiny/templates/chat/hello-providers/openai/app.py deleted file mode 100644 index 2140fefdf..000000000 --- a/shiny/templates/chat/hello-providers/openai/app.py +++ /dev/null @@ -1,50 +0,0 @@ -# ------------------------------------------------------------------------------------ -# A basic Shiny Chat example powered by OpenAI's GPT-4o model. -# To run it, you'll need OpenAI API key. -# To get setup, follow the instructions at https://platform.openai.com/docs/quickstart -# ------------------------------------------------------------------------------------ -import os - -from app_utils import load_dotenv -from openai import AsyncOpenAI - -from shiny.express import ui - -# Either explicitly set the OPENAI_API_KEY environment variable before launching the -# app, or set them in a file named `.env`. The `python-dotenv` package will load `.env` -# as environment variables which can later be read by `os.getenv()`. -load_dotenv() -llm = AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY")) - -# Set some Shiny page options -ui.page_opts( - title="Hello OpenAI Chat", - fillable=True, - fillable_mobile=True, -) - -# Create a chat instance, with an initial message -chat = ui.Chat( - id="chat", - messages=[ - {"content": "Hello! How can I help you today?", "role": "assistant"}, - ], -) - -# Display the chat -chat.ui() - - -# Define a callback to run when the user submits a message -@chat.on_user_submit -async def _(): - # Get messages currently in the chat - messages = chat.messages(format="openai") - # Create a response message stream - response = await llm.chat.completions.create( - model="gpt-4o", - messages=messages, - stream=True, - ) - # Append the response stream into the chat - await chat.append_message_stream(response) diff --git a/shiny/templates/chat/enterprise/aws-bedrock-anthropic/_template.json b/shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/_template.json similarity index 100% rename from shiny/templates/chat/enterprise/aws-bedrock-anthropic/_template.json rename to shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/_template.json diff --git a/shiny/templates/chat/enterprise/aws-bedrock-anthropic/app.py b/shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/app.py similarity index 75% rename from shiny/templates/chat/enterprise/aws-bedrock-anthropic/app.py rename to shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/app.py index fa9b7859e..d2916f85f 100644 --- a/shiny/templates/chat/enterprise/aws-bedrock-anthropic/app.py +++ b/shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/app.py @@ -4,8 +4,8 @@ # To get started, follow the instructions at https://aws.amazon.com/bedrock/claude/ # as well as https://github.com/anthropics/anthropic-sdk-python#aws-bedrock # ------------------------------------------------------------------------------------ -from anthropic import AnthropicBedrock from app_utils import load_dotenv +from chatlas import ChatBedrockAnthropic from shiny.express import ui @@ -13,7 +13,8 @@ # them in a file named `.env`. The `python-dotenv` package will load `.env` as # environment variables which can be read by `os.getenv()`. load_dotenv() -llm = AnthropicBedrock( +chat_model = ChatBedrockAnthropic( + model="anthropic.claude-3-sonnet-20240229-v1:0", # aws_secret_key=os.getenv("AWS_SECRET_KEY"), # aws_access_key=os.getenv("AWS_ACCESS_KEY"), # aws_region=os.getenv("AWS_REGION"), @@ -34,15 +35,6 @@ # Define a callback to run when the user submits a message @chat.on_user_submit -async def _(): - # Get messages currently in the chat - messages = chat.messages(format="anthropic") - # Create a response message stream - response = llm.messages.create( - model="anthropic.claude-3-5-sonnet-20241022-v2:0", - messages=messages, - stream=True, - max_tokens=1000, - ) - # Append the response stream into the chat +async def handle_user_input(user_input: str): + response = chat_model.stream(user_input) await chat.append_message_stream(response) diff --git a/shiny/templates/chat/enterprise/aws-bedrock-anthropic/app_utils.py b/shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/app_utils.py similarity index 100% rename from shiny/templates/chat/enterprise/aws-bedrock-anthropic/app_utils.py rename to shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/app_utils.py diff --git a/shiny/templates/chat/enterprise/aws-bedrock-anthropic/requirements.txt b/shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/requirements.txt similarity index 53% rename from shiny/templates/chat/enterprise/aws-bedrock-anthropic/requirements.txt rename to shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/requirements.txt index fb3b67026..a0d9e4048 100644 --- a/shiny/templates/chat/enterprise/aws-bedrock-anthropic/requirements.txt +++ b/shiny/templates/chat/llm-enterprise/aws-bedrock-anthropic/requirements.txt @@ -1,4 +1,5 @@ shiny python-dotenv tokenizers -anthropic +chatlas +anthropic[bedrock] diff --git a/shiny/templates/chat/enterprise/azure-openai/_template.json b/shiny/templates/chat/llm-enterprise/azure-openai/_template.json similarity index 100% rename from shiny/templates/chat/enterprise/azure-openai/_template.json rename to shiny/templates/chat/llm-enterprise/azure-openai/_template.json diff --git a/shiny/templates/chat/llm-enterprise/azure-openai/app.py b/shiny/templates/chat/llm-enterprise/azure-openai/app.py new file mode 100644 index 000000000..d6ba133cd --- /dev/null +++ b/shiny/templates/chat/llm-enterprise/azure-openai/app.py @@ -0,0 +1,41 @@ +# ------------------------------------------------------------------------------------ +# A basic Shiny Chat example powered by OpenAI running on Azure. +# ------------------------------------------------------------------------------------ +import os + +from app_utils import load_dotenv +from chatlas import ChatAzureOpenAI + +from shiny.express import ui + +# ChatAzureOpenAI() requires an API key from Azure OpenAI. +# See the docs for more information on how to obtain one. +# https://posit-dev.github.io/chatlas/reference/ChatAzureOpenAI.html +load_dotenv() +chat_model = ChatAzureOpenAI( + api_key=os.getenv("AZURE_OPENAI_API_KEY"), + endpoint="https://my-endpoint.openai.azure.com", + deployment_id="gpt-4o-mini", + api_version="2024-08-01-preview", +) + +# Set some Shiny page options +ui.page_opts( + title="Hello Azure OpenAI Chat", + fillable=True, + fillable_mobile=True, +) + +# Create a chat instance, with an initial message +chat = ui.Chat( + id="chat", + messages=["Hello! How can I help you today?"], +) +chat.ui() + + +# Define a callback to run when the user submits a message +@chat.on_user_submit +async def handle_user_input(user_input: str): + response = chat_model.stream(user_input) + await chat.append_message_stream(response) diff --git a/shiny/templates/chat/enterprise/azure-openai/app_utils.py b/shiny/templates/chat/llm-enterprise/azure-openai/app_utils.py similarity index 100% rename from shiny/templates/chat/enterprise/azure-openai/app_utils.py rename to shiny/templates/chat/llm-enterprise/azure-openai/app_utils.py diff --git a/shiny/templates/chat/enterprise/azure-openai/requirements.txt b/shiny/templates/chat/llm-enterprise/azure-openai/requirements.txt similarity index 82% rename from shiny/templates/chat/enterprise/azure-openai/requirements.txt rename to shiny/templates/chat/llm-enterprise/azure-openai/requirements.txt index 6e4a780cf..e7c42d64c 100644 --- a/shiny/templates/chat/enterprise/azure-openai/requirements.txt +++ b/shiny/templates/chat/llm-enterprise/azure-openai/requirements.txt @@ -1,4 +1,5 @@ shiny python-dotenv tokenizers +chatlas openai diff --git a/shiny/templates/chat/hello-providers/anthropic/_template.json b/shiny/templates/chat/llms/anthropic/_template.json similarity index 100% rename from shiny/templates/chat/hello-providers/anthropic/_template.json rename to shiny/templates/chat/llms/anthropic/_template.json diff --git a/shiny/templates/chat/llms/anthropic/app.py b/shiny/templates/chat/llms/anthropic/app.py new file mode 100644 index 000000000..585dd5a58 --- /dev/null +++ b/shiny/templates/chat/llms/anthropic/app.py @@ -0,0 +1,41 @@ +# ------------------------------------------------------------------------------------ +# A basic Shiny Chat example powered by Anthropic's Claude model. +# ------------------------------------------------------------------------------------ +import os + +from app_utils import load_dotenv +from chatlas import ChatAnthropic + +from shiny.express import ui + +# ChatAnthropic() requires an API key from Anthropic. +# See the docs for more information on how to obtain one. +# https://posit-dev.github.io/chatlas/reference/ChatAnthropic.html +load_dotenv() +chat_model = ChatAnthropic( + api_key=os.environ.get("ANTHROPIC_API_KEY"), + model="claude-3-5-sonnet-latest", + system_prompt="You are a helpful assistant.", +) + + +# Set some Shiny page options +ui.page_opts( + title="Hello Anthropic Claude Chat", + fillable=True, + fillable_mobile=True, +) + +# Create and display a Shiny chat component +chat = ui.Chat( + id="chat", + messages=["Hello! How can I help you today?"], +) +chat.ui() + + +# Generate a response when the user submits a message +@chat.on_user_submit +async def handle_user_input(user_input: str): + response = chat_model.stream(user_input) + await chat.append_message_stream(response) diff --git a/shiny/templates/chat/hello-providers/anthropic/app_utils.py b/shiny/templates/chat/llms/anthropic/app_utils.py similarity index 100% rename from shiny/templates/chat/hello-providers/anthropic/app_utils.py rename to shiny/templates/chat/llms/anthropic/app_utils.py diff --git a/shiny/templates/chat/hello-providers/anthropic/requirements.txt b/shiny/templates/chat/llms/anthropic/requirements.txt similarity index 83% rename from shiny/templates/chat/hello-providers/anthropic/requirements.txt rename to shiny/templates/chat/llms/anthropic/requirements.txt index fb3b67026..fc19951f1 100644 --- a/shiny/templates/chat/hello-providers/anthropic/requirements.txt +++ b/shiny/templates/chat/llms/anthropic/requirements.txt @@ -1,4 +1,5 @@ shiny python-dotenv tokenizers +chatlas anthropic diff --git a/shiny/templates/chat/hello-providers/gemini/_template.json b/shiny/templates/chat/llms/google/_template.json similarity index 100% rename from shiny/templates/chat/hello-providers/gemini/_template.json rename to shiny/templates/chat/llms/google/_template.json diff --git a/shiny/templates/chat/llms/google/app.py b/shiny/templates/chat/llms/google/app.py new file mode 100644 index 000000000..ed5b75248 --- /dev/null +++ b/shiny/templates/chat/llms/google/app.py @@ -0,0 +1,37 @@ +# ------------------------------------------------------------------------------------ +# A basic Shiny Chat example powered by Google's Gemini model. +# ------------------------------------------------------------------------------------ +import os + +from app_utils import load_dotenv +from chatlas import ChatGoogle + +from shiny.express import ui + +# ChatGoogle() requires an API key from Google. +# See the docs for more information on how to obtain one. +# https://posit-dev.github.io/chatlas/reference/ChatGoogle.html +load_dotenv() +chat_model = ChatGoogle( + api_key=os.environ.get("GOOGLE_API_KEY"), + system_prompt="You are a helpful assistant.", + model="gemini-1.5-flash", +) + +# Set some Shiny page options +ui.page_opts( + title="Hello Google Gemini Chat", + fillable=True, + fillable_mobile=True, +) + +# Create and display empty chat +chat = ui.Chat(id="chat") +chat.ui() + + +# Generate a response when the user submits a message +@chat.on_user_submit +async def handle_user_input(user_input: str): + response = chat_model.stream(user_input) + await chat.append_message_stream(response) diff --git a/shiny/templates/chat/hello-providers/gemini/app_utils.py b/shiny/templates/chat/llms/google/app_utils.py similarity index 100% rename from shiny/templates/chat/hello-providers/gemini/app_utils.py rename to shiny/templates/chat/llms/google/app_utils.py diff --git a/shiny/templates/chat/hello-providers/gemini/requirements.txt b/shiny/templates/chat/llms/google/requirements.txt similarity index 86% rename from shiny/templates/chat/hello-providers/gemini/requirements.txt rename to shiny/templates/chat/llms/google/requirements.txt index f3e733a88..f51cd04e3 100644 --- a/shiny/templates/chat/hello-providers/gemini/requirements.txt +++ b/shiny/templates/chat/llms/google/requirements.txt @@ -1,4 +1,5 @@ shiny python-dotenv tokenizers +chatlas google-generativeai diff --git a/shiny/templates/chat/hello-providers/langchain/_template.json b/shiny/templates/chat/llms/langchain/_template.json similarity index 100% rename from shiny/templates/chat/hello-providers/langchain/_template.json rename to shiny/templates/chat/llms/langchain/_template.json diff --git a/shiny/templates/chat/hello-providers/langchain/app.py b/shiny/templates/chat/llms/langchain/app.py similarity index 75% rename from shiny/templates/chat/hello-providers/langchain/app.py rename to shiny/templates/chat/llms/langchain/app.py index 73e13777f..1c62ce399 100644 --- a/shiny/templates/chat/hello-providers/langchain/app.py +++ b/shiny/templates/chat/llms/langchain/app.py @@ -15,7 +15,10 @@ # app, or set them in a file named `.env`. The `python-dotenv` package will load `.env` # as environment variables which can later be read by `os.getenv()`. load_dotenv() -llm = ChatOpenAI(api_key=os.environ.get("OPENAI_API_KEY")) # type: ignore +chat_model = ChatOpenAI( + api_key=os.environ.get("OPENAI_API_KEY"), + model="gpt-4o", +) # Set some Shiny page options ui.page_opts( @@ -24,17 +27,16 @@ fillable_mobile=True, ) -# Create and display an empty chat UI -chat = ui.Chat(id="chat") +# Create and display a Shiny chat component +chat = ui.Chat( + id="chat", + messages=["Hello! How can I help you today?"], +) chat.ui() # Define a callback to run when the user submits a message @chat.on_user_submit -async def _(): - # Get messages currently in the chat - messages = chat.messages(format="langchain") - # Create a response message stream - response = llm.astream(messages) - # Append the response stream into the chat +async def handle_user_input(user_input: str): + response = chat_model.stream(user_input) await chat.append_message_stream(response) diff --git a/shiny/templates/chat/hello-providers/langchain/app_utils.py b/shiny/templates/chat/llms/langchain/app_utils.py similarity index 100% rename from shiny/templates/chat/hello-providers/langchain/app_utils.py rename to shiny/templates/chat/llms/langchain/app_utils.py diff --git a/shiny/templates/chat/hello-providers/langchain/requirements.txt b/shiny/templates/chat/llms/langchain/requirements.txt similarity index 100% rename from shiny/templates/chat/hello-providers/langchain/requirements.txt rename to shiny/templates/chat/llms/langchain/requirements.txt diff --git a/shiny/templates/chat/hello-providers/ollama/_template.json b/shiny/templates/chat/llms/ollama/_template.json similarity index 100% rename from shiny/templates/chat/hello-providers/ollama/_template.json rename to shiny/templates/chat/llms/ollama/_template.json diff --git a/shiny/templates/chat/llms/ollama/app.py b/shiny/templates/chat/llms/ollama/app.py new file mode 100644 index 000000000..581050a98 --- /dev/null +++ b/shiny/templates/chat/llms/ollama/app.py @@ -0,0 +1,33 @@ +# ------------------------------------------------------------------------------------ +# A basic Shiny Chat example powered by Ollama. +# ------------------------------------------------------------------------------------ + +from chatlas import ChatOllama + +from shiny.express import ui + +# ChatOllama() requires an Ollama model server to be running locally. +# See the docs for more information on how to set up a local Ollama server. +# https://posit-dev.github.io/chatlas/reference/ChatOllama.html +chat_model = ChatOllama(model="llama3.1") + +# Set some Shiny page options +ui.page_opts( + title="Hello Ollama Chat", + fillable=True, + fillable_mobile=True, +) + +# Create and display a Shiny chat component +chat = ui.Chat( + id="chat", + messages=["Hello! How can I help you today?"], +) +chat.ui() + + +# Generate a response when the user submits a message +@chat.on_user_submit +async def handle_user_input(user_input: str): + response = chat_model.stream(user_input) + await chat.append_message_stream(response) diff --git a/shiny/templates/chat/hello-providers/ollama/requirements.txt b/shiny/templates/chat/llms/ollama/requirements.txt similarity index 75% rename from shiny/templates/chat/hello-providers/ollama/requirements.txt rename to shiny/templates/chat/llms/ollama/requirements.txt index 223d69e4b..5901288be 100644 --- a/shiny/templates/chat/hello-providers/ollama/requirements.txt +++ b/shiny/templates/chat/llms/ollama/requirements.txt @@ -1,3 +1,4 @@ shiny tokenizers +chatlas ollama diff --git a/shiny/templates/chat/hello-providers/openai/_template.json b/shiny/templates/chat/llms/openai/_template.json similarity index 100% rename from shiny/templates/chat/hello-providers/openai/_template.json rename to shiny/templates/chat/llms/openai/_template.json diff --git a/shiny/templates/chat/llms/openai/app.py b/shiny/templates/chat/llms/openai/app.py new file mode 100644 index 000000000..07ff62bb2 --- /dev/null +++ b/shiny/templates/chat/llms/openai/app.py @@ -0,0 +1,41 @@ +# ------------------------------------------------------------------------------------ +# A basic Shiny Chat example powered by OpenAI. +# ------------------------------------------------------------------------------------ +import os + +from app_utils import load_dotenv +from chatlas import ChatOpenAI + +from shiny.express import ui + +# ChatOpenAI() requires an API key from OpenAI. +# See the docs for more information on how to obtain one. +# https://posit-dev.github.io/chatlas/reference/ChatOpenAI.html +load_dotenv() +chat_model = ChatOpenAI( + api_key=os.environ.get("OPENAI_API_KEY"), + model="gpt-4o", + system_prompt="You are a helpful assistant.", +) + + +# Set some Shiny page options +ui.page_opts( + title="Hello OpenAI Chat", + fillable=True, + fillable_mobile=True, +) + +# Create and display a Shiny chat component +chat = ui.Chat( + id="chat", + messages=["Hello! How can I help you today?"], +) +chat.ui() + + +# Generate a response when the user submits a message +@chat.on_user_submit +async def handle_user_input(user_input: str): + response = chat_model.stream(user_input) + await chat.append_message_stream(response) diff --git a/shiny/templates/chat/hello-providers/openai/app_utils.py b/shiny/templates/chat/llms/openai/app_utils.py similarity index 100% rename from shiny/templates/chat/hello-providers/openai/app_utils.py rename to shiny/templates/chat/llms/openai/app_utils.py diff --git a/shiny/templates/chat/hello-providers/openai/requirements.txt b/shiny/templates/chat/llms/openai/requirements.txt similarity index 82% rename from shiny/templates/chat/hello-providers/openai/requirements.txt rename to shiny/templates/chat/llms/openai/requirements.txt index 6e4a780cf..e7c42d64c 100644 --- a/shiny/templates/chat/hello-providers/openai/requirements.txt +++ b/shiny/templates/chat/llms/openai/requirements.txt @@ -1,4 +1,5 @@ shiny python-dotenv tokenizers +chatlas openai diff --git a/shiny/templates/chat/llms/playground/_template.json b/shiny/templates/chat/llms/playground/_template.json new file mode 100644 index 000000000..b753f492c --- /dev/null +++ b/shiny/templates/chat/llms/playground/_template.json @@ -0,0 +1,5 @@ +{ + "type": "app", + "id": "chat-ai-playground", + "title": "Chat Playground w/ OpenAI, Anthropic, and Google" +} diff --git a/shiny/templates/chat/llms/playground/app.py b/shiny/templates/chat/llms/playground/app.py new file mode 100644 index 000000000..a8c3b0ebb --- /dev/null +++ b/shiny/templates/chat/llms/playground/app.py @@ -0,0 +1,106 @@ +# ------------------------------------------------------------------------------------ +# A Shiny Chat example showing how to use different language models via chatlas. +# To run it with all the different providers/models, you'll need API keys for each. +# Namely, OPENAI_API_KEY, ANTHROPIC_API_KEY, and GOOGLE_API_KEY. +# To see how to get these keys, see chatlas' reference: +# https://posit-dev.github.io/chatlas/reference/ +# ------------------------------------------------------------------------------------ + +import chatlas as ctl +from app_utils import load_dotenv + +from shiny import reactive +from shiny.express import input, ui + +load_dotenv() + +models = { + "openai": ["gpt-4o-mini", "gpt-4o"], + "claude": [ + "claude-3-opus-latest", + "claude-3-5-sonnet-latest", + "claude-3-haiku-20240307", + ], + "google": ["gemini-1.5-pro-latest"], +} + +model_choices: dict[str, dict[str, str]] = {} +for key, value in models.items(): + model_choices[key] = dict(zip(value, value)) + +ui.page_opts( + title="Shiny Chat Playground", + fillable=True, + fillable_mobile=True, +) + +# Sidebar with input controls +with ui.sidebar(position="right"): + ui.input_select("model", "Model", choices=model_choices) + ui.input_select( + "system_actor", + "Response style", + choices=["Chuck Norris", "Darth Vader", "Yoda", "Gandalf", "Sherlock Holmes"], + ) + ui.input_switch("stream", "Stream", value=True) + ui.input_slider("temperature", "Temperature", min=0, max=2, step=0.1, value=1) + ui.input_slider("max_tokens", "Max Tokens", min=1, max=4096, step=1, value=100) + ui.input_action_button("clear", "Clear chat") + +# The chat component +chat = ui.Chat(id="chat") +chat.ui(width="100%") + + +@reactive.calc +def get_model(): + model_params = { + "system_prompt": ( + "You are a helpful AI assistant. " + f" Provide answers in the style of {input.system_actor()}." + ), + "model": input.model(), + } + + if input.model() in models["openai"]: + chat_model = ctl.ChatOpenAI(**model_params) + elif input.model() in models["claude"]: + chat_model = ctl.ChatAnthropic(**model_params) + elif input.model() in models["google"]: + chat_model = ctl.ChatGoogle(**model_params) + else: + raise ValueError(f"Invalid model: {input.model()}") + + return chat_model + + +@reactive.calc +def chat_params(): + if input.model() in models["google"]: + return { + "generation_config": { + "temperature": input.temperature(), + "max_output_tokens": input.max_tokens(), + } + } + else: + return { + "temperature": input.temperature(), + "max_tokens": input.max_tokens(), + } + + +@chat.on_user_submit +async def handle_user_input(user_input: str): + if input.stream(): + response = get_model().stream(user_input, kwargs=chat_params()) + await chat.append_message_stream(response) + else: + response = get_model().chat(user_input, echo="none", kwargs=chat_params()) + await chat.append_message(response) + + +@reactive.effect +@reactive.event(input.clear) +def _(): + chat.clear_messages() diff --git a/shiny/templates/chat/production/anthropic/app_utils.py b/shiny/templates/chat/llms/playground/app_utils.py similarity index 100% rename from shiny/templates/chat/production/anthropic/app_utils.py rename to shiny/templates/chat/llms/playground/app_utils.py diff --git a/shiny/templates/chat/llms/playground/requirements.txt b/shiny/templates/chat/llms/playground/requirements.txt new file mode 100644 index 000000000..4cec5d5bb --- /dev/null +++ b/shiny/templates/chat/llms/playground/requirements.txt @@ -0,0 +1,6 @@ +chatlas +openai +anthropic +google-generativeai +python-dotenv +shiny diff --git a/shiny/templates/chat/production/anthropic/_template.json b/shiny/templates/chat/production/anthropic/_template.json deleted file mode 100644 index 271e2c040..000000000 --- a/shiny/templates/chat/production/anthropic/_template.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "app", - "id": "chat-ai-anthropic-prod", - "title": "Chat in production with Anthropic" -} diff --git a/shiny/templates/chat/production/anthropic/app.py b/shiny/templates/chat/production/anthropic/app.py deleted file mode 100644 index 423d4337c..000000000 --- a/shiny/templates/chat/production/anthropic/app.py +++ /dev/null @@ -1,65 +0,0 @@ -# ------------------------------------------------------------------------------------ -# When putting a Chat into production, there are at least a couple additional -# considerations to keep in mind: -# - Token Limits: LLMs have (varying) limits on how many tokens can be included in -# a single request and response. To accurately respect these limits, you'll want -# to find the revelant limits and tokenizer for the model you're using, and inform -# Chat about them. -# - Reproducibility: Consider pinning a snapshot of the LLM model to ensure that the -# same model is used each time the app is run. -# -# See the MODEL_INFO dictionary below for an example of how to set these values for -# Anthropic's Claude model. -# https://docs.anthropic.com/en/docs/about-claude/models#model-comparison-table -# ------------------------------------------------------------------------------------ -import os - -from anthropic import AsyncAnthropic -from app_utils import load_dotenv - -from shiny.express import ui - -load_dotenv() -llm = AsyncAnthropic(api_key=os.environ.get("ANTHROPIC_API_KEY")) - - -MODEL_INFO = { - "name": "claude-3-5-sonnet-20241022", - # DISCLAIMER: Anthropic has not yet released a public tokenizer for Claude models, - # so this uses the generic default provided by Chat() (for now). That is probably - # ok though since the default tokenizer likely overestimates the token count. - "tokenizer": None, - "token_limits": (200000, 8192), -} - - -ui.page_opts( - title="Hello Anthropic Chat", - fillable=True, - fillable_mobile=True, -) - -chat = ui.Chat( - id="chat", - messages=[ - {"content": "Hello! How can I help you today?", "role": "assistant"}, - ], - tokenizer=MODEL_INFO["tokenizer"], -) - -chat.ui() - - -@chat.on_user_submit -async def _(): - messages = chat.messages( - format="anthropic", - token_limits=MODEL_INFO["token_limits"], - ) - response = await llm.messages.create( - model=MODEL_INFO["name"], - messages=messages, - stream=True, - max_tokens=MODEL_INFO["token_limits"][1], - ) - await chat.append_message_stream(response) diff --git a/shiny/templates/chat/production/anthropic/requirements.txt b/shiny/templates/chat/production/anthropic/requirements.txt deleted file mode 100644 index fb3b67026..000000000 --- a/shiny/templates/chat/production/anthropic/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -shiny -python-dotenv -tokenizers -anthropic diff --git a/shiny/templates/chat/production/openai/_template.json b/shiny/templates/chat/production/openai/_template.json deleted file mode 100644 index 1a64e5211..000000000 --- a/shiny/templates/chat/production/openai/_template.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "app", - "id": "chat-ai-openai-prod", - "title": "Chat in production with OpenAI" -} diff --git a/shiny/templates/chat/production/openai/app.py b/shiny/templates/chat/production/openai/app.py deleted file mode 100644 index 7b1274c95..000000000 --- a/shiny/templates/chat/production/openai/app.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------------------ -# When putting a Chat into production, there are at least a couple additional -# considerations to keep in mind: -# - Token Limits: LLMs have (varying) limits on how many tokens can be included in -# a single request and response. To accurately respect these limits, you'll want -# to find the revelant limits and tokenizer for the model you're using, and inform -# Chat about them. -# - Reproducibility: Consider pinning a snapshot of the LLM model to ensure that the -# same model is used each time the app is run. -# -# See the MODEL_INFO dictionary below for an example of how to set these values for -# OpenAI's GPT-4o model. -# ------------------------------------------------------------------------------------ -import os - -import tiktoken -from app_utils import load_dotenv -from openai import AsyncOpenAI - -from shiny.express import ui - -load_dotenv() -llm = AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY")) - - -MODEL_INFO = { - "name": "gpt-4o-2024-08-06", - "tokenizer": tiktoken.encoding_for_model("gpt-4o-2024-08-06"), - "token_limits": (128000, 16000), -} - - -ui.page_opts( - title="Hello OpenAI Chat", - fillable=True, - fillable_mobile=True, -) - -chat = ui.Chat( - id="chat", - messages=[ - {"content": "Hello! How can I help you today?", "role": "assistant"}, - ], - tokenizer=MODEL_INFO["tokenizer"], -) - -chat.ui() - - -@chat.on_user_submit -async def _(): - messages = chat.messages(format="openai", token_limits=MODEL_INFO["token_limits"]) - response = await llm.chat.completions.create( - model=MODEL_INFO["name"], messages=messages, stream=True - ) - await chat.append_message_stream(response) diff --git a/shiny/templates/chat/production/openai/app_utils.py b/shiny/templates/chat/production/openai/app_utils.py deleted file mode 100644 index 404a13730..000000000 --- a/shiny/templates/chat/production/openai/app_utils.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -from pathlib import Path -from typing import Any - -app_dir = Path(__file__).parent -env_file = app_dir / ".env" - - -def load_dotenv(dotenv_path: os.PathLike[str] = env_file, **kwargs: Any) -> None: - """ - A convenience wrapper around `dotenv.load_dotenv` that warns if `dotenv` is not installed. - It also returns `None` to make it easier to ignore the return value. - """ - try: - import dotenv - - dotenv.load_dotenv(dotenv_path=dotenv_path, **kwargs) - except ImportError: - import warnings - - warnings.warn( - "Could not import `dotenv`. If you want to use `.env` files to " - "load environment variables, please install it using " - "`pip install python-dotenv`.", - stacklevel=2, - ) diff --git a/shiny/templates/chat/production/openai/requirements.txt b/shiny/templates/chat/production/openai/requirements.txt deleted file mode 100644 index 4e5ab6b2a..000000000 --- a/shiny/templates/chat/production/openai/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -shiny -python-dotenv -tiktoken -openai diff --git a/shiny/templates/chat/starters/hello/_template.json b/shiny/templates/chat/starters/hello/_template.json new file mode 100644 index 000000000..6cdfd1a2f --- /dev/null +++ b/shiny/templates/chat/starters/hello/_template.json @@ -0,0 +1,5 @@ +{ + "type": "app", + "id": "chat-hello", + "title": "Hello Shiny Chat" +} diff --git a/examples/chat/hello-world/app-core.py b/shiny/templates/chat/starters/hello/app-core.py similarity index 80% rename from examples/chat/hello-world/app-core.py rename to shiny/templates/chat/starters/hello/app-core.py index 17d8395fb..5c088fcaf 100644 --- a/examples/chat/hello-world/app-core.py +++ b/shiny/templates/chat/starters/hello/app-core.py @@ -11,7 +11,7 @@ """ Hi! This is a simple Shiny `Chat` UI. Enter a message below and I will simply repeat it back to you. For more examples, see this - [folder of examples](https://github.com/posit-dev/py-shiny/tree/main/examples/chat). + [folder of examples](https://github.com/posit-dev/py-shiny/tree/main/shiny/templates/chat). """ ) @@ -21,11 +21,9 @@ def server(input, output, session): # Define a callback to run when the user submits a message @chat.on_user_submit - async def _(): - # Get the user's input - user = chat.user_input() + async def handle_user_input(user_input: str): # Append a response to the chat - await chat.append_message(f"You said: {user}") + await chat.append_message(f"You said: {user_input}") app = App(app_ui, server) diff --git a/examples/chat/hello-world/app.py b/shiny/templates/chat/starters/hello/app.py similarity index 81% rename from examples/chat/hello-world/app.py rename to shiny/templates/chat/starters/hello/app.py index 3eabe7c00..43fb202d5 100644 --- a/examples/chat/hello-world/app.py +++ b/shiny/templates/chat/starters/hello/app.py @@ -12,7 +12,7 @@ """ Hi! This is a simple Shiny `Chat` UI. Enter a message below and I will simply repeat it back to you. For more examples, see this - [folder of examples](https://github.com/posit-dev/py-shiny/tree/main/examples/chat). + [folder of examples](https://github.com/posit-dev/py-shiny/tree/main/shiny/templates/chat). """ ) @@ -28,8 +28,6 @@ # Define a callback to run when the user submits a message @chat.on_user_submit -async def _(): - # Get the user's input - user = chat.user_input() +async def handle_user_input(user_input: str): # Append a response to the chat - await chat.append_message(f"You said: {user}") + await chat.append_message(f"You said: {user_input}") diff --git a/examples/chat/hello-world/requirements.txt b/shiny/templates/chat/starters/hello/requirements.txt similarity index 100% rename from examples/chat/hello-world/requirements.txt rename to shiny/templates/chat/starters/hello/requirements.txt diff --git a/shiny/templates/chat/starters/sidebar-dark/_template.json b/shiny/templates/chat/starters/sidebar-dark/_template.json new file mode 100644 index 000000000..eb96af780 --- /dev/null +++ b/shiny/templates/chat/starters/sidebar-dark/_template.json @@ -0,0 +1,5 @@ +{ + "type": "app", + "id": "chat-sidebar-dark", + "title": "Chat in a sidebar with dark mode" +} diff --git a/shiny/templates/chat/starters/sidebar-dark/app.py b/shiny/templates/chat/starters/sidebar-dark/app.py new file mode 100644 index 000000000..57b29571c --- /dev/null +++ b/shiny/templates/chat/starters/sidebar-dark/app.py @@ -0,0 +1,30 @@ +# -------------------------------------------------------------------------------- +# This example demonstrates Shiny Chat's dark mode capability. +# -------------------------------------------------------------------------------- + +from shiny.express import ui + +# Page options with a dark mode toggle +ui.page_opts( + title=ui.tags.div( + "Hello Dark mode", + ui.input_dark_mode(mode="dark"), + class_="d-flex justify-content-between w-100", + ), + fillable=True, + fillable_mobile=True, +) + +# An empty, closed, sidebar +with ui.sidebar(width=300, style="height:100%", position="right"): + chat = ui.Chat(id="chat", messages=["Welcome to the dark side!"]) + chat.ui(height="100%") + + +# Define a callback to run when the user submits a message +@chat.on_user_submit +async def handle_user_input(user_input: str): + await chat.append_message_stream(f"You said: {user_input}") + + +"Lorem ipsum dolor sit amet, consectetur adipiscing elit" diff --git a/shiny/ui/_chat.py b/shiny/ui/_chat.py index 367b0e963..61476cd48 100644 --- a/shiny/ui/_chat.py +++ b/shiny/ui/_chat.py @@ -83,7 +83,7 @@ PendingMessage = Tuple[Any, ChunkOption, Union[str, None]] -@add_example(ex_dir="../api-examples/chat") +@add_example(ex_dir="../templates/chat/starters/hello") class Chat: """ Create a chat interface. @@ -105,7 +105,7 @@ class Chat: # Define a callback to run when the user submits a message @chat.on_user_submit - async def handle_user_input(user_input): + async def handle_user_input(user_input: str): # Create a response message stream response = await my_model.generate_response(user_input, stream=True) # Append the response into the chat @@ -1047,7 +1047,7 @@ async def _send_custom_message(self, handler: str, obj: ClientMessage | None): ) -@add_example(ex_dir="../api-examples/chat") +@add_example(ex_dir="../templates/chat/starters/hello") def chat_ui( id: str, *,