diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 5cc8772ac5..a9af13bc66 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -225,7 +225,7 @@ jobs: run: | source ${OV_INSTALL_DIR}/setupvars.sh python -m pip install ./thirdparty/openvino_tokenizers/[transformers] -r ./tests/python_tests/requirements.txt --find-links ${OV_INSTALL_DIR}/wheels - python -m pytest -v ./tests/python_tests/test_chat_generate_api.py::test_set_chat_template + python -m pytest -v ./tests/python_tests/test_tokenizer.py::test_set_chat_template env: PYTHONPATH: "./build/:$PYTHONPATH" diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 7e1aacc715..f88bc4c6f3 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -236,7 +236,7 @@ jobs: run: | . "${{ env.OV_INSTALL_DIR }}/setupvars.ps1" python -m pip install ./thirdparty/openvino_tokenizers/[transformers] -r ./tests/python_tests/requirements.txt --find-links ${env:OV_INSTALL_DIR}/wheels - python -m pytest -v ./tests/python_tests/test_chat_generate_api.py::test_set_chat_template + python -m pytest -v ./tests/python_tests/test_tokenizer.py::test_set_chat_template env: PYTHONPATH: "./build/" # cmd evaluates variables in a different way. Setting PYTHONPATH before setupvars.bat instead of doing that after solves that. diff --git a/src/cpp/include/openvino/genai/generation_config.hpp b/src/cpp/include/openvino/genai/generation_config.hpp index b8b222e347..4ea75e94c5 100644 --- a/src/cpp/include/openvino/genai/generation_config.hpp +++ b/src/cpp/include/openvino/genai/generation_config.hpp @@ -45,6 +45,10 @@ enum class StopCriteria { EARLY, HEURISTIC, NEVER }; * @param logprobs number of top logprobs computed for each position, if set to 0, logprobs are not computed and value 0.0 is returned. * Currently only single top logprob can be returned, so any logprobs > 1 is treated as logprobs == 1. (default: 0). * + * @param repetition_penalty the parameter for repetition penalty. 1.0 means no penalty. + * @param presence_penalty reduces absolute log prob if the token was generated at least once. + * @param frequency_penalty reduces absolute log prob as many times as the token was generated. + * * Beam search specific parameters: * @param num_beams number of beams for beam search. 1 disables beam search. * @param num_beam_groups number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. @@ -61,15 +65,13 @@ enum class StopCriteria { EARLY, HEURISTIC, NEVER }; * "HEURISTIC" is applied and the generation stops when is it very unlikely to find better candidates; * "NEVER", where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). * - * Random sampling parameters: + * Random (or multinomial) sampling parameters: + * @param do_sample whether or not to use multinomial random sampling that add up to `top_p` or higher are kept. * @param temperature the value used to modulate token probabilities for random sampling. * @param top_p - if set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. * @param top_k the number of highest probability vocabulary tokens to keep for top-k-filtering. - * @param do_sample whether or not to use multinomial random sampling that add up to `top_p` or higher are kept. - * @param repetition_penalty the parameter for repetition penalty. 1.0 means no penalty. - * @param presence_penalty reduces absolute log prob if the token was generated at least once. - * @param frequency_penalty reduces absolute log prob as many times as the token was generated. * @param rng_seed initializes random generator. + * @param num_return_sequences the number of sequences to generate from a single prompt. * * Assisting generation parameters: * @param assistant_confidence_threshold the lower token probability of candidate to be validated by main model in case of dynamic strategy candidates number update. @@ -90,7 +92,7 @@ class OPENVINO_GENAI_EXPORTS GenerationConfig { size_t min_new_tokens = 0; bool echo = false; size_t logprobs = 0; - + std::set stop_strings; // Default setting in vLLM (and OpenAI API) is not to include stop string in the output bool include_stop_str_in_output = false; diff --git a/src/cpp/include/openvino/genai/tokenizer.hpp b/src/cpp/include/openvino/genai/tokenizer.hpp index 38fc0aaf8c..548e4dc332 100644 --- a/src/cpp/include/openvino/genai/tokenizer.hpp +++ b/src/cpp/include/openvino/genai/tokenizer.hpp @@ -36,9 +36,9 @@ class OPENVINO_GENAI_EXPORTS Tokenizer { /** * @brief ov::genai::Tokenizer constructor to initialize directly from model and weights - * - * This constructor is used when tokenizer and detokenizer are separate models already loaded into memory. - * When this constructor is used bos, eos, pad token ids are expected to be in IR. + * + * This constructor is used when tokenizer and detokenizer are separate models already loaded into memory. + * When this constructor is used bos, eos, pad token ids are expected to be in IR. * If an IR is older (< 2024.3) then this tokens are default initialized to be ignored. * @param tokenizer_model_str tokenizer model string * @param tokenizer_weights_tensor ov::Tensor with tokenizer weights @@ -55,9 +55,9 @@ class OPENVINO_GENAI_EXPORTS Tokenizer { ); /** - * @brief ov::genai::Tokenizer constructor to initialize directly from model and weights. - * - * This constructor is used when tokenizer (or detokenizer) already loaded into memory. Whether it's + * @brief ov::genai::Tokenizer constructor to initialize directly from model and weights. + * + * This constructor is used when tokenizer (or detokenizer) already loaded into memory. Whether it's * tokenizer or detokenizer is defined from model input signature. When this constructor is used bos, eos, pad token ids * are expected to be in IR. If an IR is older (< 2024.3) then this tokens are default initialized to be ignored. * @param model_str model string @@ -82,7 +82,7 @@ class OPENVINO_GENAI_EXPORTS Tokenizer { ov::Tensor& detokenizer_weights_tensor, Properties&&... properties ) : Tokenizer(tokenizer_model_str, tokenizer_weights_tensor, detokenizer_model_str, detokenizer_weights_tensor, ov::AnyMap{std::forward(properties)...}) { } - + /** * @brief ov::genai::Tokenizer constructor with variable number of properties * @param model_str model string @@ -93,7 +93,7 @@ class OPENVINO_GENAI_EXPORTS Tokenizer { Tokenizer(const std::string& model_str, ov::Tensor& weights_tensor, Properties&&... properties) : Tokenizer(model_str, weights_tensor, ov::AnyMap{std::forward(properties)...}) { } - + /** * @brief ov::genai::Tokenizer constructor with variable number of properties * @param tokenizer_path openvino_tokenizer.xml and openvino_detokenizer.xml should be located in the tokenizer_path @@ -111,7 +111,7 @@ class OPENVINO_GENAI_EXPORTS Tokenizer { * @return pair of [input_ids, attention_mask] */ TokenizedInputs encode(const std::string prompt, const ov::AnyMap& tokenization_params = {}); - + /** * @brief encode batch of prompts. Left padding will be applied by default * @param prompts vector storing batch of prompts @@ -127,7 +127,7 @@ class OPENVINO_GENAI_EXPORTS Tokenizer { * @param prompt std::string with input prompt * @param properties tokenization properties, e.g. ov::genai::add_special_tokens(false) * @return pair of [input_ids, attention_mask] - */ + */ template util::EnableIfAllStringAny encode(std::string& prompt, Properties&&... properties) { return encode(prompt, AnyMap{std::forward(properties)...}); @@ -164,7 +164,7 @@ class OPENVINO_GENAI_EXPORTS Tokenizer { } /** - * @brief decode tokens. + * @brief decode tokens. * @param tokens ov::Tensor with tokens with shape [batch_size, seq_len] * @param detokenization_params AnyMap with detokenization parameters, e.g. {"skip_special_tokens", false} * @return vector of std::string, with size = batch_size @@ -183,7 +183,7 @@ class OPENVINO_GENAI_EXPORTS Tokenizer { } /** - * @brief batched decoding of tokens. + * @brief batched decoding of tokens. * @param tokens vector of vectors with tokens, tokens.size() is equal to batch_size * @param detokenization_params AnyMap with detokenization parameters, e.g. {"skip_special_tokens", false} * @return vector of std::string, with size equal to batch_size @@ -203,8 +203,8 @@ class OPENVINO_GENAI_EXPORTS Tokenizer { /** * @brief Embeds input prompts with special tags for a chat scenario. - * - * For example, for Qwen family models, the prompt "1+1=" would be transformed into + * + * For example, for Qwen family models, the prompt "1+1=" would be transformed into * <|im_start|>user\n1+1=<|im_end|>\n<|im_start|>assistant\n. * * @param history A vector of maps, with chat history, e.g. [{"role": "user", "content": "prompt"}, ...]. @@ -214,7 +214,7 @@ class OPENVINO_GENAI_EXPORTS Tokenizer { * @throws Exception if the chat template was unable to parse the input history. */ std::string apply_chat_template(ChatHistory history, - bool add_generation_prompt, + bool add_generation_prompt, const std::string& chat_template = {}) const; /// @brief Override a chat_template read from tokenizer_config.json. diff --git a/src/cpp/src/generation_config.cpp b/src/cpp/src/generation_config.cpp index 35ae92d605..4ff184547e 100644 --- a/src/cpp/src/generation_config.cpp +++ b/src/cpp/src/generation_config.cpp @@ -185,6 +185,9 @@ void GenerationConfig::validate() const { "Either 'eos_token_id', or 'max_new_tokens', or 'max_length' should be defined."); if (is_beam_search()) { OPENVINO_ASSERT(no_repeat_ngram_size > 0, "no_repeat_ngram_size must be positive"); + if (num_beam_groups > 1) { + OPENVINO_ASSERT(diversity_penalty != 0.0f, "For grouped beam search 'diversity_penalty' should not be zero, it it fallbacks to non-grouped beam search"); + } } else { OPENVINO_ASSERT(frequency_penalty >= -2.0f && frequency_penalty <= 2.0f, "frequence_penalty penalty must be a [-2; +2]"); OPENVINO_ASSERT(presence_penalty >= -2.0f && presence_penalty <= 2.0f, "presence_penalty penalty must be a [-2; +2]"); diff --git a/src/cpp/src/tokenizer.cpp b/src/cpp/src/tokenizer.cpp index b098f96fe6..82c0a17a55 100644 --- a/src/cpp/src/tokenizer.cpp +++ b/src/cpp/src/tokenizer.cpp @@ -89,15 +89,16 @@ class Tokenizer::TokenizerImpl { public: ov::CompiledModel m_tokenizer; ov::CompiledModel m_detokenizer; - + std::unique_ptr> m_ireq_queue_tokenizer; std::unique_ptr> m_ireq_queue_detokenizer; - // To change the adding special tokens mode we use a statefull subgraph, + + // To change the adding special tokens mode we use a statefull subgraph, // this flag holds the current state value of the CompiledModel. bool m_add_special_tokens = true; bool m_skip_special_tokens = true; bool m_older_than_24_5 = false; - + int64_t m_pad_token_id = -1; int64_t m_bos_token_id = -1; int64_t m_eos_token_id = -1; @@ -111,6 +112,7 @@ class Tokenizer::TokenizerImpl { void set_state_if_necessary(CircularBufferQueueElementGuard& infer_request_guard, const ov::AnyMap& params) { bool add_special_tokens_flag = m_add_special_tokens; bool skip_special_tokens_flag = m_skip_special_tokens; + ov::genai::utils::read_anymap_param(params, add_special_tokens.name(), add_special_tokens_flag); ov::genai::utils::read_anymap_param(params, skip_special_tokens.name(), skip_special_tokens_flag); @@ -126,11 +128,11 @@ class Tokenizer::TokenizerImpl { // state but the effect is incorrect. return; } - + // add_special_tokens is managed by Select op with a bool input. ov::Tensor add_special_tensor = ov::Tensor(ov::element::boolean, {}); *add_special_tensor.data() = add_special_tokens_flag; - + // skip_special_tokens is managed by multiplication with a number, therefore i32. ov::Tensor skip_special_tensor = ov::Tensor(ov::element::i32, {1}); *skip_special_tensor.data() = skip_special_tokens_flag; @@ -148,19 +150,19 @@ class Tokenizer::TokenizerImpl { TokenizerImpl() = default; - TokenizerImpl(const std::filesystem::path& models_papth, const ov::AnyMap& properties) { - setupTokenizer(models_papth, properties); + TokenizerImpl(const std::filesystem::path& models_path, const ov::AnyMap& properties) { + setup_tokenizer(models_path, properties); } TokenizerImpl(const std::pair, std::shared_ptr>& models, const ov::AnyMap& properties) { - setupTokenizer(models, properties); + setup_tokenizer(models, properties); } - void setupTokenizer(const std::filesystem::path& models_path, const ov::AnyMap& properties) { + void setup_tokenizer(const std::filesystem::path& models_path, const ov::AnyMap& properties) { ScopedVar env_manager(tokenizers_relative_to_genai().string()); auto core = get_core_singleton(); - OPENVINO_ASSERT(models_path.extension() != ".xml", "'models_papth' parameter should be a path to a dir not a xml file"); + OPENVINO_ASSERT(models_path.extension() != ".xml", "'models_path' parameter should be a path to a dir not a xml file"); std::shared_ptr ov_tokenizer = nullptr; std::shared_ptr ov_detokenizer = nullptr; @@ -168,12 +170,12 @@ class Tokenizer::TokenizerImpl { if (std::filesystem::exists(models_path / "openvino_tokenizer.xml")) { ov_tokenizer = core.read_model(models_path / "openvino_tokenizer.xml"); } - + if (std::filesystem::exists(models_path / "openvino_detokenizer.xml")) { ov_detokenizer = core.read_model(models_path / "openvino_detokenizer.xml"); } - setupTokenizer(std::make_pair(ov_tokenizer, ov_detokenizer), properties); + setup_tokenizer(std::make_pair(ov_tokenizer, ov_detokenizer), properties); // If special tokens were not found from IR, try to read them from config. // This will be triggered only for IRs older than 2024.3. @@ -184,21 +186,20 @@ class Tokenizer::TokenizerImpl { // Try to read tokenizer_config if some token ids or token str are not defined. read_tokenizer_config_if_necessary(models_path); } - + // If chat_template was not found in IR, try to read them from config. if (m_chat_template.empty()) { m_chat_template = chat_template_from_tokenizer_json_if_exists(models_path); } } - - void setupTokenizer(const std::pair, std::shared_ptr>& models, const ov::AnyMap& properties) { + void setup_tokenizer(const std::pair, std::shared_ptr>& models, const ov::AnyMap& properties) { auto [ov_tokenizer, ov_detokenizer] = models; OPENVINO_ASSERT(ov_tokenizer || ov_detokenizer, "Neither tokenizer nor detokenzier models were provided"); auto core = get_core_singleton(); std::string device = "CPU"; // only CPU is supported for now - + std::string version_str; utils::read_rt_info(ov_tokenizer != nullptr ? ov_tokenizer: ov_detokenizer , "openvino_tokenizers_version", version_str); // Saving IR version was added only in 24.5, so if it's empty, then it's older than 24.5 @@ -231,7 +232,7 @@ class Tokenizer::TokenizerImpl { return std::move(this->m_detokenizer.create_infer_request()); }); } - + // Initialize tokenizer's cache to save time later. if (m_tokenizer) { // TODO CVS-150630: Empty strings sporadically can fail, therefore use nonempty string for warmup. @@ -286,10 +287,11 @@ class Tokenizer::TokenizerImpl { nlohmann::json data = nlohmann::json::parse(f); - using ov::genai::utils::read_json_param; // they are in the format {"bos_token": { "content": "",... }} - auto read_token_content_str = [&data](std::string key_name, std::string& val) { - if (val == "" && data.contains(key_name)) { read_json_param(data[key_name], "content", val); } + auto read_token_content_str = [&data](const std::string& key_name, std::string& val) { + if (val.empty() && data.contains(key_name)) { + utils::read_json_param(data[key_name], "content", val); + } }; read_token_content_str(pad_token_key_name, m_pad_token); read_token_content_str(bos_token_key_name, m_bos_token); @@ -494,7 +496,7 @@ class Tokenizer::TokenizerImpl { {"is none", "is undefined"}, {"= none", "= undefined"}, // Jinja2Cpp does not support Python-style slicing, e.g. [1:]. - // If chat template contains such slicing, we replace it with + // If chat template contains such slicing, we replace it with // a placeholder at the moment. {"messages[1:]", "slice(messages, 1)"}, }; @@ -537,7 +539,7 @@ class Tokenizer::TokenizerImpl { env.GetSettings().trimBlocks = true; jinja2::Template tpl(&env); tpl.Load(chat_tpl); - + jinja2::UserCallable slice_callable = jinja2::MakeCallable( [](const jinja2::GenericList& messages, const size_t& start) { jinja2::ValuesList result; @@ -607,7 +609,7 @@ Tokenizer::Tokenizer(const std::string& model_str, ov::Tensor& weights_tensor, c ScopedVar env_manager(tokenizers_relative_to_genai().string()); auto core = get_core_singleton(); auto model = core.read_model(model_str, weights_tensor); - + auto parameters = model->get_parameters(); OPENVINO_ASSERT(!parameters.empty()); if (parameters.front()->get_element_type() == ov::element::string) { diff --git a/src/python/openvino_genai/py_openvino_genai.pyi b/src/python/openvino_genai/py_openvino_genai.pyi index 3d27b23052..8510a8389f 100644 --- a/src/python/openvino_genai/py_openvino_genai.pyi +++ b/src/python/openvino_genai/py_openvino_genai.pyi @@ -361,10 +361,10 @@ class ContinuousBatchingPipeline: This class is used for generation with LLMs with continuous batchig """ @typing.overload - def __init__(self, models_path: str, scheduler_config: SchedulerConfig, device: str, properties: dict[str, typing.Any] = {}, tokenizer_properties: dict[str, typing.Any] = {}) -> None: + def __init__(self, models_path: os.PathLike, scheduler_config: SchedulerConfig, device: str, properties: dict[str, typing.Any] = {}, tokenizer_properties: dict[str, typing.Any] = {}) -> None: ... @typing.overload - def __init__(self, models_path: str, tokenizer: Tokenizer, scheduler_config: SchedulerConfig, device: str, properties: dict[str, typing.Any] = {}) -> None: + def __init__(self, models_path: os.PathLike, tokenizer: Tokenizer, scheduler_config: SchedulerConfig, device: str, properties: dict[str, typing.Any] = {}) -> None: ... @typing.overload def add_request(self, request_id: int, input_ids: openvino._pyopenvino.Tensor, sampling_params: GenerationConfig) -> GenerationHandle: @@ -522,17 +522,17 @@ class FluxTransformer2DModel: class GenerationConfig: """ - Structure to keep generation config parameters. For a selected method of decoding, only parameters from that group - and generic parameters are used. For example, if do_sample is set to true, then only generic parameters and random sampling parameters will + Structure to keep generation config parameters. For a selected method of decoding, only parameters from that group + and generic parameters are used. For example, if do_sample is set to true, then only generic parameters and random sampling parameters will be used while greedy and beam search parameters will not affect decoding at all. - Parameters: + Parameters: max_length: the maximum length the generated tokens can have. Corresponds to the length of the input prompt + max_new_tokens. Its effect is overridden by `max_new_tokens`, if also set. max_new_tokens: the maximum numbers of tokens to generate, excluding the number of tokens in the prompt. max_new_tokens has priority over max_length. + min_new_tokens: set 0 probability for eos_token_id for the first eos_token_id generated tokens. ignore_eos: if set to true, then generation will not stop even if token is met. eos_token_id: token_id of (end of sentence) - min_new_tokens: set 0 probability for eos_token_id for the first eos_token_id generated tokens. stop_strings: a set of strings that will cause pipeline to stop generating further tokens. include_stop_str_in_output: if set to true stop string that matched generation will be included in generation output (default: false) stop_token_ids: a set of tokens that will cause pipeline to stop generating further tokens. @@ -540,6 +540,10 @@ class GenerationConfig: logprobs: number of top logprobs computed for each position, if set to 0, logprobs are not computed and value 0.0 is returned. Currently only single top logprob can be returned, so any logprobs > 1 is treated as logprobs == 1. (default: 0). + repetition_penalty: the parameter for repetition penalty. 1.0 means no penalty. + presence_penalty: reduces absolute log prob if the token was generated at least once. + frequency_penalty: reduces absolute log prob as many times as the token was generated. + Beam search specific parameters: num_beams: number of beams for beam search. 1 disables beam search. num_beam_groups: number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. @@ -550,8 +554,8 @@ class GenerationConfig: length_penalty < 0.0 encourages shorter sequences. num_return_sequences: the number of sequences to return for grouped beam search decoding. no_repeat_ngram_size: if set to int > 0, all ngrams of that size can only occur once. - stop_criteria: controls the stopping condition for grouped beam search. It accepts the following values: - "openvino_genai.StopCriteria.EARLY", where the generation stops as soon as there are `num_beams` complete candidates; + stop_criteria: controls the stopping condition for grouped beam search. It accepts the following values: + "openvino_genai.StopCriteria.EARLY", where the generation stops as soon as there are `num_beams` complete candidates; "openvino_genai.StopCriteria.HEURISTIC" is applied and the generation stops when is it very unlikely to find better candidates; "openvino_genai.StopCriteria.NEVER", where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). @@ -560,7 +564,7 @@ class GenerationConfig: top_p: if set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. top_k: the number of highest probability vocabulary tokens to keep for top-k-filtering. do_sample: whether or not to use multinomial random sampling that add up to `top_p` or higher are kept. - repetition_penalty: the parameter for repetition penalty. 1.0 means no penalty. + num_return_sequences: the number of sequences to generate from a single prompt. """ adapters: AdapterConfig | None assistant_confidence_threshold: float @@ -951,17 +955,17 @@ class LLMPipeline: :rtype: DecodedResults, EncodedResults, str - Structure to keep generation config parameters. For a selected method of decoding, only parameters from that group - and generic parameters are used. For example, if do_sample is set to true, then only generic parameters and random sampling parameters will + Structure to keep generation config parameters. For a selected method of decoding, only parameters from that group + and generic parameters are used. For example, if do_sample is set to true, then only generic parameters and random sampling parameters will be used while greedy and beam search parameters will not affect decoding at all. - Parameters: + Parameters: max_length: the maximum length the generated tokens can have. Corresponds to the length of the input prompt + max_new_tokens. Its effect is overridden by `max_new_tokens`, if also set. max_new_tokens: the maximum numbers of tokens to generate, excluding the number of tokens in the prompt. max_new_tokens has priority over max_length. + min_new_tokens: set 0 probability for eos_token_id for the first eos_token_id generated tokens. ignore_eos: if set to true, then generation will not stop even if token is met. eos_token_id: token_id of (end of sentence) - min_new_tokens: set 0 probability for eos_token_id for the first eos_token_id generated tokens. stop_strings: a set of strings that will cause pipeline to stop generating further tokens. include_stop_str_in_output: if set to true stop string that matched generation will be included in generation output (default: false) stop_token_ids: a set of tokens that will cause pipeline to stop generating further tokens. @@ -969,6 +973,10 @@ class LLMPipeline: logprobs: number of top logprobs computed for each position, if set to 0, logprobs are not computed and value 0.0 is returned. Currently only single top logprob can be returned, so any logprobs > 1 is treated as logprobs == 1. (default: 0). + repetition_penalty: the parameter for repetition penalty. 1.0 means no penalty. + presence_penalty: reduces absolute log prob if the token was generated at least once. + frequency_penalty: reduces absolute log prob as many times as the token was generated. + Beam search specific parameters: num_beams: number of beams for beam search. 1 disables beam search. num_beam_groups: number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. @@ -979,8 +987,8 @@ class LLMPipeline: length_penalty < 0.0 encourages shorter sequences. num_return_sequences: the number of sequences to return for grouped beam search decoding. no_repeat_ngram_size: if set to int > 0, all ngrams of that size can only occur once. - stop_criteria: controls the stopping condition for grouped beam search. It accepts the following values: - "openvino_genai.StopCriteria.EARLY", where the generation stops as soon as there are `num_beams` complete candidates; + stop_criteria: controls the stopping condition for grouped beam search. It accepts the following values: + "openvino_genai.StopCriteria.EARLY", where the generation stops as soon as there are `num_beams` complete candidates; "openvino_genai.StopCriteria.HEURISTIC" is applied and the generation stops when is it very unlikely to find better candidates; "openvino_genai.StopCriteria.NEVER", where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). @@ -989,7 +997,7 @@ class LLMPipeline: top_p: if set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. top_k: the number of highest probability vocabulary tokens to keep for top-k-filtering. do_sample: whether or not to use multinomial random sampling that add up to `top_p` or higher are kept. - repetition_penalty: the parameter for repetition penalty. 1.0 means no penalty. + num_return_sequences: the number of sequences to generate from a single prompt. """ @typing.overload def __init__(self, models_path: os.PathLike, tokenizer: Tokenizer, device: str, config: dict[str, typing.Any] = {}, **kwargs) -> None: @@ -1032,17 +1040,17 @@ class LLMPipeline: :rtype: DecodedResults, EncodedResults, str - Structure to keep generation config parameters. For a selected method of decoding, only parameters from that group - and generic parameters are used. For example, if do_sample is set to true, then only generic parameters and random sampling parameters will + Structure to keep generation config parameters. For a selected method of decoding, only parameters from that group + and generic parameters are used. For example, if do_sample is set to true, then only generic parameters and random sampling parameters will be used while greedy and beam search parameters will not affect decoding at all. - Parameters: + Parameters: max_length: the maximum length the generated tokens can have. Corresponds to the length of the input prompt + max_new_tokens. Its effect is overridden by `max_new_tokens`, if also set. max_new_tokens: the maximum numbers of tokens to generate, excluding the number of tokens in the prompt. max_new_tokens has priority over max_length. + min_new_tokens: set 0 probability for eos_token_id for the first eos_token_id generated tokens. ignore_eos: if set to true, then generation will not stop even if token is met. eos_token_id: token_id of (end of sentence) - min_new_tokens: set 0 probability for eos_token_id for the first eos_token_id generated tokens. stop_strings: a set of strings that will cause pipeline to stop generating further tokens. include_stop_str_in_output: if set to true stop string that matched generation will be included in generation output (default: false) stop_token_ids: a set of tokens that will cause pipeline to stop generating further tokens. @@ -1050,6 +1058,10 @@ class LLMPipeline: logprobs: number of top logprobs computed for each position, if set to 0, logprobs are not computed and value 0.0 is returned. Currently only single top logprob can be returned, so any logprobs > 1 is treated as logprobs == 1. (default: 0). + repetition_penalty: the parameter for repetition penalty. 1.0 means no penalty. + presence_penalty: reduces absolute log prob if the token was generated at least once. + frequency_penalty: reduces absolute log prob as many times as the token was generated. + Beam search specific parameters: num_beams: number of beams for beam search. 1 disables beam search. num_beam_groups: number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. @@ -1060,8 +1072,8 @@ class LLMPipeline: length_penalty < 0.0 encourages shorter sequences. num_return_sequences: the number of sequences to return for grouped beam search decoding. no_repeat_ngram_size: if set to int > 0, all ngrams of that size can only occur once. - stop_criteria: controls the stopping condition for grouped beam search. It accepts the following values: - "openvino_genai.StopCriteria.EARLY", where the generation stops as soon as there are `num_beams` complete candidates; + stop_criteria: controls the stopping condition for grouped beam search. It accepts the following values: + "openvino_genai.StopCriteria.EARLY", where the generation stops as soon as there are `num_beams` complete candidates; "openvino_genai.StopCriteria.HEURISTIC" is applied and the generation stops when is it very unlikely to find better candidates; "openvino_genai.StopCriteria.NEVER", where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). @@ -1070,7 +1082,7 @@ class LLMPipeline: top_p: if set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. top_k: the number of highest probability vocabulary tokens to keep for top-k-filtering. do_sample: whether or not to use multinomial random sampling that add up to `top_p` or higher are kept. - repetition_penalty: the parameter for repetition penalty. 1.0 means no penalty. + num_return_sequences: the number of sequences to generate from a single prompt. """ def get_generation_config(self) -> GenerationConfig: ... @@ -1420,7 +1432,7 @@ class StopCriteria: """ StopCriteria controls the stopping condition for grouped beam search. - + The following values are possible: "openvino_genai.StopCriteria.EARLY" stops as soon as there are `num_beams` complete candidates. "openvino_genai.StopCriteria.HEURISTIC" stops when is it unlikely to find better candidates. diff --git a/src/python/py_continuous_batching_pipeline.cpp b/src/python/py_continuous_batching_pipeline.cpp index 772ba0af8a..be7a72481f 100644 --- a/src/python/py_continuous_batching_pipeline.cpp +++ b/src/python/py_continuous_batching_pipeline.cpp @@ -212,7 +212,7 @@ void init_continuous_batching_pipeline(py::module_& m) { .def_readonly("max_cache_usage", &PipelineMetrics::max_cache_usage); py::class_(m, "ContinuousBatchingPipeline", "This class is used for generation with LLMs with continuous batchig") - .def(py::init([](const std::string& models_path, const SchedulerConfig& scheduler_config, const std::string& device, const std::map& llm_plugin_config, const std::map& tokenizer_plugin_config) { + .def(py::init([](const std::filesystem::path& models_path, const SchedulerConfig& scheduler_config, const std::string& device, const std::map& llm_plugin_config, const std::map& tokenizer_plugin_config) { ScopedVar env_manager(pyutils::ov_tokenizers_module_path()); return std::make_unique(models_path, scheduler_config, device, pyutils::properties_to_any_map(llm_plugin_config), pyutils::properties_to_any_map(tokenizer_plugin_config)); }), @@ -222,7 +222,7 @@ void init_continuous_batching_pipeline(py::module_& m) { py::arg("properties") = ov::AnyMap({}), py::arg("tokenizer_properties") = ov::AnyMap({})) - .def(py::init([](const std::string& models_path, const ov::genai::Tokenizer& tokenizer, const SchedulerConfig& scheduler_config, const std::string& device, const std::map& plugin_config) { + .def(py::init([](const std::filesystem::path& models_path, const ov::genai::Tokenizer& tokenizer, const SchedulerConfig& scheduler_config, const std::string& device, const std::map& plugin_config) { ScopedVar env_manager(pyutils::ov_tokenizers_module_path()); return std::make_unique(models_path, tokenizer, scheduler_config, device, pyutils::properties_to_any_map(plugin_config)); }), diff --git a/src/python/py_generation_config.cpp b/src/python/py_generation_config.cpp index b1a5c6cd2e..f49bcf29bd 100644 --- a/src/python/py_generation_config.cpp +++ b/src/python/py_generation_config.cpp @@ -20,7 +20,7 @@ namespace { auto stop_criteria_docstring = R"( StopCriteria controls the stopping condition for grouped beam search. - + The following values are possible: "openvino_genai.StopCriteria.EARLY" stops as soon as there are `num_beams` complete candidates. "openvino_genai.StopCriteria.HEURISTIC" stops when is it unlikely to find better candidates. @@ -30,17 +30,17 @@ auto stop_criteria_docstring = R"( } // namespace char generation_config_docstring[] = R"( - Structure to keep generation config parameters. For a selected method of decoding, only parameters from that group - and generic parameters are used. For example, if do_sample is set to true, then only generic parameters and random sampling parameters will + Structure to keep generation config parameters. For a selected method of decoding, only parameters from that group + and generic parameters are used. For example, if do_sample is set to true, then only generic parameters and random sampling parameters will be used while greedy and beam search parameters will not affect decoding at all. - Parameters: + Parameters: max_length: the maximum length the generated tokens can have. Corresponds to the length of the input prompt + max_new_tokens. Its effect is overridden by `max_new_tokens`, if also set. max_new_tokens: the maximum numbers of tokens to generate, excluding the number of tokens in the prompt. max_new_tokens has priority over max_length. + min_new_tokens: set 0 probability for eos_token_id for the first eos_token_id generated tokens. ignore_eos: if set to true, then generation will not stop even if token is met. eos_token_id: token_id of (end of sentence) - min_new_tokens: set 0 probability for eos_token_id for the first eos_token_id generated tokens. stop_strings: a set of strings that will cause pipeline to stop generating further tokens. include_stop_str_in_output: if set to true stop string that matched generation will be included in generation output (default: false) stop_token_ids: a set of tokens that will cause pipeline to stop generating further tokens. @@ -48,6 +48,10 @@ char generation_config_docstring[] = R"( logprobs: number of top logprobs computed for each position, if set to 0, logprobs are not computed and value 0.0 is returned. Currently only single top logprob can be returned, so any logprobs > 1 is treated as logprobs == 1. (default: 0). + repetition_penalty: the parameter for repetition penalty. 1.0 means no penalty. + presence_penalty: reduces absolute log prob if the token was generated at least once. + frequency_penalty: reduces absolute log prob as many times as the token was generated. + Beam search specific parameters: num_beams: number of beams for beam search. 1 disables beam search. num_beam_groups: number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. @@ -58,8 +62,8 @@ char generation_config_docstring[] = R"( length_penalty < 0.0 encourages shorter sequences. num_return_sequences: the number of sequences to return for grouped beam search decoding. no_repeat_ngram_size: if set to int > 0, all ngrams of that size can only occur once. - stop_criteria: controls the stopping condition for grouped beam search. It accepts the following values: - "openvino_genai.StopCriteria.EARLY", where the generation stops as soon as there are `num_beams` complete candidates; + stop_criteria: controls the stopping condition for grouped beam search. It accepts the following values: + "openvino_genai.StopCriteria.EARLY", where the generation stops as soon as there are `num_beams` complete candidates; "openvino_genai.StopCriteria.HEURISTIC" is applied and the generation stops when is it very unlikely to find better candidates; "openvino_genai.StopCriteria.NEVER", where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). @@ -68,7 +72,7 @@ char generation_config_docstring[] = R"( top_p: if set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. top_k: the number of highest probability vocabulary tokens to keep for top-k-filtering. do_sample: whether or not to use multinomial random sampling that add up to `top_p` or higher are kept. - repetition_penalty: the parameter for repetition penalty. 1.0 means no penalty. + num_return_sequences: the number of sequences to generate from a single prompt. )"; void init_generation_config(py::module_& m) { diff --git a/tests/python_tests/common.py b/tests/python_tests/common.py index cf5fbb3403..7e3c075405 100644 --- a/tests/python_tests/common.py +++ b/tests/python_tests/common.py @@ -42,13 +42,6 @@ def get_greedy_with_penalties() -> GenerationConfig: generation_config.max_new_tokens = 30 return generation_config -def get_greedy_with_min_and_max_tokens() -> GenerationConfig: - generation_config = GenerationConfig() - generation_config.num_return_sequences = 1 - generation_config.min_new_tokens = 15 - generation_config.max_new_tokens = 30 - return generation_config - def get_greedy_with_single_stop_string() -> GenerationConfig: generation_config = GenerationConfig() generation_config.num_return_sequences = 1 @@ -296,10 +289,12 @@ def convert_to_hf( kwargs['max_length'] = generation_config.max_length # has higher priority than 'max_length' kwargs['max_new_tokens'] = generation_config.max_new_tokens + kwargs['min_new_tokens'] = generation_config.min_new_tokens if generation_config.stop_strings: kwargs['stop_strings'] = generation_config.stop_strings # copy default parameters + kwargs['bos_token_id'] = default_generation_config.bos_token_id kwargs['eos_token_id'] = default_generation_config.eos_token_id kwargs['pad_token_id'] = default_generation_config.pad_token_id kwargs['repetition_penalty'] = generation_config.repetition_penalty @@ -308,11 +303,12 @@ def convert_to_hf( # beam search case kwargs['num_beam_groups'] = generation_config.num_beam_groups kwargs['num_beams'] = generation_config.num_beams - kwargs['diversity_penalty'] = generation_config.diversity_penalty kwargs['length_penalty'] = generation_config.length_penalty kwargs['no_repeat_ngram_size'] = generation_config.no_repeat_ngram_size kwargs['num_return_sequences'] = generation_config.num_return_sequences kwargs['output_scores'] = True + if generation_config.num_beam_groups > 1: + kwargs['diversity_penalty'] = generation_config.diversity_penalty elif generation_config.do_sample: # mulitinomial kwargs['temperature'] = generation_config.temperature @@ -328,7 +324,7 @@ def convert_to_hf( def run_hugging_face( - model, + opt_model, hf_tokenizer, prompts: List[str], generation_configs: List[GenerationConfig], @@ -337,8 +333,9 @@ def run_hugging_face( for prompt, generation_config in zip(prompts, generation_configs): inputs = hf_tokenizer(prompt, return_tensors="pt") prompt_len = inputs['input_ids'].numel() - generate_outputs = model.generate(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], generation_config=convert_to_hf(model.generation_config, generation_config), - return_dict_in_generate=True, tokenizer=hf_tokenizer) + generate_outputs = opt_model.generate(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], + generation_config=convert_to_hf(opt_model.generation_config, generation_config), + return_dict_in_generate=True, tokenizer=hf_tokenizer) all_text_batch = hf_tokenizer.batch_decode([generated_ids[prompt_len:] for generated_ids in generate_outputs.sequences], skip_special_tokens=True) generation_result = GenerationResult() @@ -349,7 +346,7 @@ def run_hugging_face( generation_results.append(generation_result) del hf_tokenizer - del model + del opt_model return generation_results @@ -360,14 +357,14 @@ def run_continuous_batching( prompts: List[str], generation_configs : List[GenerationConfig] ) -> List[GenerationResult]: - pipe = ContinuousBatchingPipeline(models_path.absolute().as_posix(), scheduler_config, "CPU", {}, {}) + pipe = ContinuousBatchingPipeline(models_path, scheduler_config, "CPU") output = pipe.generate(prompts, generation_configs) del pipe shutil.rmtree(models_path) return output -def get_models_list(file_name: str): +def read_models_list(file_name: str): models = [] with open(file_name) as f: for model_name in f: @@ -395,6 +392,14 @@ def compare_results(hf_result: GenerationResult, ov_result: GenerationResult, ge for hf_text, ov_text in zip(hf_result.m_generation_ids, ov_result.m_generation_ids): assert hf_text == ov_text + +def get_hugging_face_model_and_tokenizer(model_id: str, use_optimum = True): + hf_tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) + opt_model = OVModelForCausalLM.from_pretrained(model_id, export=True, trust_remote_code=True) if use_optimum else \ + AutoModelForCausalLM.from_pretrained(model_id) + return opt_model, hf_tokenizer + + def save_ov_model_from_optimum(model, hf_tokenizer, models_path: Path): model.save_pretrained(models_path) # convert tokenizers as well @@ -404,23 +409,6 @@ def save_ov_model_from_optimum(model, hf_tokenizer, models_path: Path): serialize(tokenizer, models_path / "openvino_tokenizer.xml") serialize(detokenizer, models_path / "openvino_detokenizer.xml") -def get_model_and_tokenizer(model_id: str, use_optimum = True): - hf_tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) - model = OVModelForCausalLM.from_pretrained(model_id, export=True, trust_remote_code=True) if use_optimum else \ - AutoModelForCausalLM.from_pretrained(model_id) - return model, hf_tokenizer - -def generate_and_compare_with_hf(model_id: str, prompts: List[str], generation_configs: List[GenerationConfig], scheduler_config: SchedulerConfig, tmp_path: Path): - use_optimum = True - models_path : Path = tmp_path / model_id - model, hf_tokenizer = get_model_and_tokenizer(model_id, use_optimum) - - if use_optimum: - save_ov_model_from_optimum(model, hf_tokenizer, models_path) - - hf_results = run_hugging_face(model=model, hf_tokenizer=hf_tokenizer, prompts=prompts, generation_configs=generation_configs) - _generate_and_compare_with_reference_results(models_path, prompts, hf_results, generation_configs, scheduler_config) - def _generate_and_compare_with_reference_results(models_path: Path, prompts: List[str], reference_results: List[GenerationResult], generation_configs: List[GenerationConfig], scheduler_config: SchedulerConfig): ov_results : List[GenerationResult] = run_continuous_batching(models_path, scheduler_config, prompts, generation_configs) @@ -433,19 +421,32 @@ def _generate_and_compare_with_reference_results(models_path: Path, prompts: Lis compare_results(ref_result, ov_result, generation_config) +def generate_and_compare_with_hf(model_id: str, prompts: List[str], generation_configs: List[GenerationConfig], scheduler_config: SchedulerConfig, tmp_path: Path): + use_optimum = True + models_path : Path = tmp_path / model_id + opt_model, hf_tokenizer = get_hugging_face_model_and_tokenizer(model_id, use_optimum) + + if use_optimum: + save_ov_model_from_optimum(opt_model, hf_tokenizer, models_path) + + hf_results = run_hugging_face(opt_model=opt_model, hf_tokenizer=hf_tokenizer, prompts=prompts, generation_configs=generation_configs) + _generate_and_compare_with_reference_results(models_path, prompts, hf_results, generation_configs, scheduler_config) + + def generate_and_compare_with_reference_text(models_path: Path, prompts: List[str], reference_texts_per_prompt: List[List[str]], generation_configs: List[GenerationConfig], scheduler_config: SchedulerConfig): ov_results : List[GenerationResult] = run_continuous_batching(models_path, scheduler_config, prompts, generation_configs) assert len(prompts) == len(reference_texts_per_prompt) assert len(prompts) == len(ov_results) - for prompt, ref_texts_for_this_prompt, ov_result, generation_config in zip(prompts, reference_texts_per_prompt, ov_results, generation_configs): + for prompt, ref_texts_for_this_prompt, ov_result in zip(prompts, reference_texts_per_prompt, ov_results): print(f"Prompt = {prompt}\nref text = {ref_texts_for_this_prompt}\nOV result = {ov_result.m_generation_ids}") assert len(ref_texts_for_this_prompt) == len(ov_result.m_generation_ids) for ref_text, ov_text in zip(ref_texts_for_this_prompt, ov_result.m_generation_ids): assert ref_text == ov_text + def run_test_pipeline(tmp_path: str, model_id: str, scheduler_params: dict = None, generation_config = None): prompts, generation_configs = get_test_dataset() scheduler_config = get_scheduler_config(scheduler_params) diff --git a/tests/python_tests/ov_genai_test_utils.py b/tests/python_tests/ov_genai_test_utils.py index 5f2702a774..87b2147bcd 100644 --- a/tests/python_tests/ov_genai_test_utils.py +++ b/tests/python_tests/ov_genai_test_utils.py @@ -57,33 +57,6 @@ def get_models_list(): return [(model_id, prefix / model_id.split('/')[1]) for model_id in model_ids] -def get_whisper_models_list(tiny_only=False, multilingual=False, en_only=False): - precommit_models = [ - "openai/whisper-tiny", - "openai/whisper-tiny.en", - "distil-whisper/distil-small.en", - ] - if multilingual: - precommit_models = ["openai/whisper-tiny"] - if en_only: - precommit_models = ["openai/whisper-tiny.en", "distil-whisper/distil-small.en"] - if tiny_only: - precommit_models = ["openai/whisper-tiny"] - - nightly_models = [] - - if pytest.run_marker == "precommit": - model_ids = precommit_models - else: - model_ids = nightly_models - - if pytest.selected_model_ids: - model_ids = [model_id for model_id in model_ids if model_id in pytest.selected_model_ids.split(' ')] - - prefix = pathlib.Path(os.getenv('GENAI_MODELS_PATH_PREFIX', '')) - return [(model_id, prefix / model_id.split('/')[1]) for model_id in model_ids] - - def get_chat_models_list(): precommit_models = [ "Qwen/Qwen2-0.5B-Instruct", @@ -101,90 +74,31 @@ def get_chat_models_list(): model_ids = precommit_models else: model_ids = nightly_models - + prefix = pathlib.Path(os.getenv('GENAI_MODELS_PATH_PREFIX', '')) return [(model_id, prefix / model_id.split('/')[1]) for model_id in model_ids] -def get_chat_templates(): - # Returns chat templates saved in tokenizer_configs.py, - # but skips some models that currently are not processed correctly. - - skipped_models = { - # TODO: openchat/openchat_3.5 and berkeley-nest/Starling-LM-7B-alpha have the same template. - # Need to enable and unskip, since it's preset in continuous batching and has >100 000 downloads. - "openchat/openchat-3.5-0106", - - # These models fail even on HF so no need to check if applying chat matches. - "vibhorag101/llama-2-13b-chat-hf-phr_mental_therapy", - "codellama/CodeLlama-34b-Instruct-hf", - "deepseek-ai/deepseek-math-7b-rl", - "allenai/tulu-2-7b", - "alexsobolev/IcaroLM", - "tokyotech-llm/Swallow-7b-instruct-v0.1", - "bofenghuang/vigogne-2-7b-chat", - "OpenBuddy/openbuddy-mistral2-7b-v20.3-32k", - "AliAbdelrasheed/maqa_llama_4bit", - "stephenlzc/Mistral-7B-v0.3-Chinese-Chat-uncensored", - - # TODO: Need to support chat templates in more models: CVS-145963 - # Either ov_genai is unable to parse chat_template or results do not match with HF. - "meta-llama/Meta-Llama-3-8B-Instruct", - "databricks/dbrx-instruct", # Chat template is not supported by Jinja2Cpp - "mosaicml/mpt-30b-chat", - "deepseek-ai/deepseek-coder-6.7b-instruct", # Chat template is not supported by Jinja2Cpp - "maldv/winter-garden-7b-alpha", # Chat template is not supported by Jinja2Cpp - "ishorn5/RTLCoder-Deepseek-v1.1", # Chat template is not supported by Jinja2Cpp - "openchat/openchat-3.5-0106", - "casperhansen/llama-3-70b-instruct-awq", - "TheBloke/deepseek-coder-33B-instruct-GPTQ", - "AI-Sweden-Models/gpt-sw3-356m-instruct", - "google/gemma-7b-it", - "THUDM/cogvlm2-llama3-chat-19B", - "KnutJaegersberg/internlm-20b-llama", - "maywell/Synatra-Mixtral-8x7B", - "MediaTek-Research/Breeze-7B-Instruct-v1_0", - "bofenghuang/vigostral-7b-chat", - "meetkai/functionary-small-v2.5", # Chat template is not supported by Jinja2Cpp - "openchat/openchat-3.6-8b-20240522", - "tenyx/TenyxChat-7B-v1", - "LoneStriker/TinyLlama-1.1B-32k-Instruct-3.0bpw-h6-exl2", - "yam-peleg/Hebrew-Gemma-11B-V2", - "shenzhi-wang/Llama3-8B-Chinese-Chat", # AssertionError - "nlpai-lab/KULLM3", - "HuggingFaceH4/zephyr-7b-gemma-sft-v0.1", - "MediaTek-Research/Breeze-7B-Instruct-v0_1", - "shanchen/llama3-8B-slerp-biomed-chat-chinese", # AssertionError - "MLP-KTLim/llama-3-Korean-Bllossom-8B", - "aloobun/CosmicBun-8B", # Chat template is not supported by Jinja2Cpp - "codellama/CodeLlama-70b-Instruct-hf", - "gorilla-llm/gorilla-openfunctions-v2", # Chat template is not supported by Jinja2Cpp - "BramVanroy/Llama-2-13b-chat-dutch" - } - from tokenizer_configs import get_tokenizer_configs - return [(k, v) for k, v in get_tokenizer_configs().items() if k not in skipped_models] - - @functools.lru_cache(1) def read_model(params, **tokenizer_kwargs): model_id, path = params from optimum.intel.openvino import OVModelForCausalLM from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) + hf_tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) if (path / "openvino_model.xml").exists(): opt_model = OVModelForCausalLM.from_pretrained(path, trust_remote_code=True, compile=False, device='CPU') else: - ov_tokenizer, ov_detokenizer = openvino_tokenizers.convert_tokenizer(tokenizer, + ov_tokenizer, ov_detokenizer = openvino_tokenizers.convert_tokenizer(hf_tokenizer, with_detokenizer=True, **tokenizer_kwargs) openvino.save_model(ov_tokenizer, path / "openvino_tokenizer.xml") openvino.save_model(ov_detokenizer, path / "openvino_detokenizer.xml") # to store tokenizer config jsons with special tokens - tokenizer.save_pretrained(path) + hf_tokenizer.save_pretrained(path) opt_model = OVModelForCausalLM.from_pretrained(model_id, export=True, trust_remote_code=True, compile=False, device='CPU', load_in_8bit=False) @@ -195,7 +109,7 @@ def read_model(params, **tokenizer_kwargs): return ( model_id, path, - tokenizer, + hf_tokenizer, opt_model, ov_genai.LLMPipeline(path, 'CPU', **{'ENABLE_MMAP': False}), ) @@ -256,20 +170,8 @@ def model_tokenizers_path_tmp_path(tmpdir_factory): yield model_id, Path(temp_path) -def load_tok(configs: List[Tuple], temp_path): - # load Tokenizer where all configs are cleared. - # remove existing jsons from previous tests - for json_file in temp_path.glob("*.json"): - json_file.unlink() - - for config_json, config_name in configs: - with (temp_path / config_name).open('w') as f: - json.dump(config_json, f) - return ov_genai.Tokenizer(temp_path) - - -def load_pipe(configs: List[Tuple], temp_path): - # Load LLMPipline where all configs are cleared. +def load_genai_pipe_with_configs(configs: List[Tuple], temp_path): + # Load LLMPipeline where all configs are cleared. # remove existing jsons from previous tests for json_file in temp_path.glob("*.json"): json_file.unlink() diff --git a/tests/python_tests/test_cache_optimizations.py b/tests/python_tests/test_cache_optimizations.py index 3c09d34756..d89697ba42 100644 --- a/tests/python_tests/test_cache_optimizations.py +++ b/tests/python_tests/test_cache_optimizations.py @@ -112,8 +112,8 @@ def test_cache_optimized_generation_is_similar_to_unoptimized(converted_model, t scheduler_config_opt.enable_prefix_caching = enable_prefix_caching models_path = converted_model.models_path - model_cb_noopt = ContinuousBatchingPipeline(models_path.absolute().as_posix(), scheduler_config, "CPU", {}) - model_cb_opt = ContinuousBatchingPipeline(models_path.absolute().as_posix(), scheduler_config_opt, "CPU", {}) + model_cb_noopt = ContinuousBatchingPipeline(models_path, scheduler_config, "CPU") + model_cb_opt = ContinuousBatchingPipeline(models_path, scheduler_config_opt, "CPU") tokenizer = converted_model.tokenizer diff --git a/tests/python_tests/test_chat_generate_api.py b/tests/python_tests/test_chat_generate_api.py index d9661e538b..07b4f7c15f 100644 --- a/tests/python_tests/test_chat_generate_api.py +++ b/tests/python_tests/test_chat_generate_api.py @@ -4,24 +4,21 @@ import openvino_genai as ov_genai import pytest from typing import Dict, Tuple + from ov_genai_test_utils import ( - get_models_list, get_chat_models_list, read_model, - load_tok, - model_tmp_path, - get_chat_templates, get_continuous_batching, ) -configs = [ +generation_configs = [ dict(do_sample=False, max_new_tokens=20), dict(do_sample=False, num_beam_groups=3, num_beams=15, num_return_sequences=1, max_new_tokens=10, diversity_penalty=1.0) ] -quenstions = [ +questions = [ '1+1=', 'What is the previous answer?', 'Why is the Sun yellow?', @@ -29,7 +26,7 @@ ] -@pytest.mark.parametrize("generation_config", configs) +@pytest.mark.parametrize("generation_config", generation_configs) @pytest.mark.parametrize("model_descr", get_chat_models_list()) @pytest.mark.precommit @pytest.mark.nightly @@ -37,18 +34,18 @@ def test_chat_compare_with_HF(model_descr, generation_config: Dict): chat_history_hf = [] chat_history_ov = [] chat_prompt = '' - + # Will set add_special_tokens=False inside pipeline when start_chat() is called. model_id, path, tokenizer, model_opt, pipe = read_model((model_descr[0], model_descr[1] / '_test_chat')) - pipe.start_chat() - for prompt in quenstions: + pipe.start_chat() + for prompt in questions: chat_history_hf.append({'role': 'user', 'content': prompt}) chat_history_ov.append({'role': 'user', 'content': prompt}) - + chat_prompt = tokenizer.apply_chat_template(chat_history_hf, tokenize=False, add_generation_prompt=True) tokenized = tokenizer(chat_prompt, return_tensors='pt', add_special_tokens=False) - + answer = model_opt.generate(**tokenized, **generation_config) answer_str = tokenizer.decode(answer[0, tokenized['input_ids'].numel():], skip_special_tokens=True) chat_history_hf.append({'role': 'assistant', 'content': answer_str}) @@ -57,14 +54,15 @@ def test_chat_compare_with_HF(model_descr, generation_config: Dict): chat_history_ov.append({'role': 'assistant', 'content': answer_ov}) pipe.finish_chat() - + if chat_history_ov != chat_history_hf: print(f'hf_output: {chat_history_hf}') print(f'ov_output: {chat_history_ov}') + assert chat_history_ov == chat_history_hf -@pytest.mark.parametrize("generation_config", configs) +@pytest.mark.parametrize("generation_config", generation_configs) @pytest.mark.parametrize("model_descr", get_chat_models_list()) @pytest.mark.precommit @pytest.mark.nightly @@ -73,172 +71,48 @@ def test_chat_compare_text_history_with_HF(model_descr, generation_config: Dict) chat_history_hf = [] chat_history_ov = [] chat_prompt = '' - + # HF in chat scenario does not add special tokens, but openvino tokenizer by default is converted with add_special_tokens=True. # Need to regenerate openvino_tokenizer/detokenizer. - model_id, path, tokenizer, model_opt, pipe = read_model((model_descr[0], model_descr[1] / '_test_chat'), add_special_tokens=False) - - for prompt in quenstions: + model_id, path, hf_tokenizer, model_opt, ov_pipe = read_model((model_descr[0], model_descr[1] / '_test_chat'), add_special_tokens=False) + ov_tokenizer = ov_pipe.get_tokenizer() + + for prompt in questions: chat_history_hf.append({'role': 'user', 'content': prompt}) chat_history_ov.append({'role': 'user', 'content': prompt}) - - chat_prompt = tokenizer.apply_chat_template(chat_history_hf, tokenize=False, add_generation_prompt=True) - tokenized = tokenizer(chat_prompt, return_tensors='pt', add_special_tokens=False) - + + chat_prompt = hf_tokenizer.apply_chat_template(chat_history_hf, tokenize=False, add_generation_prompt=True) + tokenized = hf_tokenizer(chat_prompt, return_tensors='pt', add_special_tokens=False) + answer = model_opt.generate(**tokenized, **generation_config) - answer_str = tokenizer.decode(answer[0, tokenized['input_ids'].numel():], skip_special_tokens=True) + answer_str = hf_tokenizer.decode(answer[0, tokenized['input_ids'].numel():], skip_special_tokens=True) chat_history_hf.append({'role': 'assistant', 'content': answer_str}) - - chat_prompt = pipe.get_tokenizer().apply_chat_template(chat_history_ov, add_generation_prompt=True) - answer_ov = pipe.generate(chat_prompt, **generation_config) + + chat_prompt = ov_tokenizer.apply_chat_template(chat_history_ov, add_generation_prompt=True) + answer_ov = ov_pipe.generate(chat_prompt, **generation_config) chat_history_ov.append({'role': 'assistant', 'content': answer_ov}) - + if chat_history_ov != chat_history_hf: print(f'hf_output: {chat_history_hf}') print(f'ov_output: {chat_history_ov}') + assert chat_history_ov == chat_history_hf -@pytest.mark.parametrize("generation_config", configs) -@pytest.mark.parametrize("model_descr", get_chat_models_list()) -@pytest.mark.precommit -@pytest.mark.nightly -def test_chat_compare_statefull_vs_text_history(model_descr, generation_config: Dict): - # Check that when history is stored in KV cache results are the same as when history stored in a text. - device ='CPU' - - chat_history_with_kv_cache = [] - chat_history_ov = [] - - # HF in chat scenario does not add special tokens, but openvino tokenizer by default is converted with add_special_tokens=True. - # Need to regenerate openvino_tokenizer/detokenizer. - model_id, path, tokenizer, model_opt, pipe = read_model((model_descr[0], model_descr[1] / '_test_chat'), add_special_tokens=False) - pipe_with_kv_cache = ov_genai.LLMPipeline(path, device, **{"ENABLE_MMAP": False}) - - pipe_with_kv_cache.start_chat() - for question in quenstions: - chat_history_with_kv_cache.append({'role': 'user', 'content': question}) - answer = pipe_with_kv_cache.generate(question, **generation_config) - chat_history_with_kv_cache.append({'role': 'assistant', 'content': answer}) - - chat_history_ov.append({'role': 'user', 'content': question}) - prompt = pipe.get_tokenizer().apply_chat_template(chat_history_ov, add_generation_prompt=True) - answer = pipe.generate(prompt, **generation_config) - chat_history_ov.append({'role': 'assistant', 'content': answer}) - pipe_with_kv_cache.finish_chat() - - if chat_history_ov != chat_history_with_kv_cache: - print(f'kvcache_hist: {chat_history_with_kv_cache}') - print(f'text_history: {chat_history_ov}') - assert chat_history_ov == chat_history_with_kv_cache - - -conversation = [ - {'role': 'user', 'content': '1+1='}, - {'role': 'assistant', 'content': '1 + 1 = 2'}, - {'role': 'user', 'content': 'What is the previous answer?'}, - {'role': 'assistant', 'content': 'The previous answer was: 1 + 1 = 2. Please ask me your next question.'}, - {'role': 'user', 'content': 'Why is the sun yellow?'}, - {'role': 'assistant', 'content': 'Because it emits yeloow light.'}, - {'role': 'user', 'content': 'What was my first question?'}, -] -@pytest.mark.precommit -@pytest.mark.nightly -@pytest.mark.parametrize('chat_config', get_chat_templates()) -def test_apply_chat_template(model_tmp_path, chat_config: Tuple[str, Dict]): - tokenizer_config = chat_config[1] - - # Will load openvino_model for tiny-random-phi as a placeholder - # but indeed only Tokenizer and apply_chat_template will be tested. - model_id, path, tokenizer, opt_model, pipe = read_model(get_models_list()[0]) - - full_history_str_hf = tokenizer.apply_chat_template(conversation, - add_generation_prompt=False, - tokenize=False, - **tokenizer_config) - - tok = load_tok([(tokenizer_config, "tokenizer_config.json")], model_tmp_path[1]) - tok.set_chat_template(tokenizer_config['chat_template']) - full_history_str = tok.apply_chat_template(conversation, add_generation_prompt=False) - if full_history_str != full_history_str_hf: - print(f'hf reference: {full_history_str_hf}') - print(f'ov_genai out: {full_history_str}') - assert full_history_str == full_history_str_hf - - -@pytest.mark.parametrize("generation_config", configs[1:]) +@pytest.mark.parametrize("generation_config", generation_configs[1:]) @pytest.mark.parametrize("model_descr", get_chat_models_list()) @pytest.mark.precommit def test_chat_continuous_batching_vs_stateful(model_descr, generation_config: Dict): - model_id, path, tokenizer, model, stateful = read_model((model_descr[0], model_descr[1] / '_test_chat')) - cb = get_continuous_batching(path) - stateful.start_chat() - cb.start_chat() - for question in quenstions: - generated = cb.generate(question, **generation_config) - reference = stateful.generate(question, **generation_config) - assert generated == reference - # Test that finish_chat() doesn't fail just in case. - cb.finish_chat() - -@pytest.mark.precommit -@pytest.mark.nightly -def test_set_chat_template(): - model_descr = get_chat_models_list()[0] - model_id, path, tokenizer, model_opt, pipe = read_model((model_descr[0], model_descr[1] / '_test_chat')) - pipe.get_tokenizer().set_chat_template("{% for message in messages %}{{ message['content'] }}{% endfor %}") - config = ov_genai.GenerationConfig() - config.max_new_tokens = 1 - config.do_sample = False - pipe.start_chat() - generated = pipe.generate("a", config) - pipe.finish_chat() - reference = pipe.generate("a", config) - assert generated == reference + model_id, path, hf_tokenizer, opt_model, ov_stateful_pipe = read_model((model_descr[0], model_descr[1] / '_test_chat')) + cb_pipe = get_continuous_batching(path) -prompts = [ - '1+1=', - 'What is the previous answer?', - 'Why is the Sun yellow?', - 'What was my first question?', - ['Why is the Sun yellow?'], - "若我有一亿美元,在人工智能盛行的今天,我怎样投资才能收益最大化?", - "מחרוזת בדיקה", - "Multiline\nstring!\nWow!", -] + ov_stateful_pipe.start_chat() + cb_pipe.start_chat() -@pytest.mark.precommit -@pytest.mark.nightly -@pytest.mark.parametrize("add_special_tokens", [True, False]) -@pytest.mark.parametrize("prompt", prompts) -def test_add_special_tokens(add_special_tokens, prompt): - import numpy as np - model_descr = get_chat_models_list()[0] - model_id, path, hf_tokenizer, model_opt, pipe = read_model((model_descr[0], model_descr[1] / '_test_chat')) - genai_tokenzier = pipe.get_tokenizer() - - # Calling encode with add_special_tokens will set state flag. - res_genai = genai_tokenzier.encode(prompt, add_special_tokens).input_ids.data - res_hf = hf_tokenizer(prompt, return_tensors="np", add_special_tokens=add_special_tokens)["input_ids"] - assert np.all(res_genai == res_hf) + for question in questions: + generated = cb_pipe.generate(question, **generation_config) + reference = ov_stateful_pipe.generate(question, **generation_config) + assert generated == reference -@pytest.mark.precommit -@pytest.mark.nightly -@pytest.mark.parametrize("add_special_tokens", [True, False]) -@pytest.mark.parametrize("skip_special_tokens", [True, False]) -@pytest.mark.parametrize("prompt", prompts) -def test_add_special_tokens(add_special_tokens, skip_special_tokens, prompt): - import numpy as np - model_descr = get_chat_models_list()[0] - model_id, path, hf_tokenizer, model_opt, pipe = read_model((model_descr[0], model_descr[1] / '_test_chat')) - genai_tokenizer = pipe.get_tokenizer() - - # Calling encode with add_special_tokens will set state flag. - res_genai = genai_tokenizer.encode(prompt, add_special_tokens).input_ids.data - res_hf = hf_tokenizer(prompt, return_tensors="np", add_special_tokens=add_special_tokens)["input_ids"] - assert np.all(res_genai == res_hf) - - # Decode with skip_special_tokens - decoded_genai = genai_tokenizer.decode(res_genai, skip_special_tokens=skip_special_tokens)[0] - decoded_hf = hf_tokenizer.decode(res_hf[0], skip_special_tokens=skip_special_tokens) - assert decoded_genai == decoded_hf + # Test that finish_chat() doesn't fail just in case. + cb_pipe.finish_chat() diff --git a/tests/python_tests/test_generate_api.py b/tests/python_tests/test_generate_api.py index 9bb9eff49c..824a3cca26 100644 --- a/tests/python_tests/test_generate_api.py +++ b/tests/python_tests/test_generate_api.py @@ -4,7 +4,6 @@ import openvino_genai as ov_genai from openvino_genai import StopCriteria import pytest -import transformers from typing import Union, List, Dict, Optional import numpy as np import openvino as ov @@ -15,8 +14,7 @@ from ov_genai_test_utils import ( get_models_list, read_model, - load_pipe, - load_tok, + load_genai_pipe_with_configs, model_tmp_path, STOP_CRITERIA_MAP, get_continuous_batching, @@ -24,7 +22,7 @@ def run_hf_ov_genai_comparison_batched(model_descr, generation_config: Dict, prompts: Union[str, List[str]]): - model_id, path, tokenizer, model, pipe = model_descr + model_id, path, hf_tokenizer, opt_model, ov_pipe = model_descr config = generation_config.copy() # to avoid side effects num_beams = config['num_beams'] if 'num_beams' in config else 1 config['num_return_sequences'] = num_beams @@ -39,25 +37,25 @@ def run_hf_ov_genai_comparison_batched(model_descr, generation_config: Dict, pro # Do not apply 'repetition_penalty' if sampling is not used. config['do_sample'] = False config['repetition_penalty'] = 1.0 # 1.0 means no penalty - + generation_config_hf = config.copy() if generation_config_hf.get('stop_criteria'): generation_config_hf['early_stopping'] = STOP_CRITERIA_MAP[generation_config_hf.pop('stop_criteria')] generation_config_hf.pop('ignore_eos', None) # Encode the batch of prompts - tokenizer.padding_side = "left" - encoded_prompts = tokenizer(prompts, return_tensors='pt', padding=True, truncation=True, add_special_tokens=True) + hf_tokenizer.padding_side = "left" + encoded_prompts = hf_tokenizer(prompts, return_tensors='pt', padding=True, truncation=True, add_special_tokens=True) prompt_ids, attention_mask = encoded_prompts['input_ids'], encoded_prompts['attention_mask'] - - hf_encoded_outputs = model.generate(prompt_ids, attention_mask=attention_mask, **generation_config_hf) + + hf_encoded_outputs = opt_model.generate(prompt_ids, attention_mask=attention_mask, **generation_config_hf) hf_outputs = [] for idx, hf_encoded_out in enumerate(hf_encoded_outputs): prompt_count = idx // num_beams - hf_outputs.append(tokenizer.decode(hf_encoded_out[prompt_ids[prompt_count].shape[0]:], skip_special_tokens=True)) + hf_outputs.append(hf_tokenizer.decode(hf_encoded_out[prompt_ids[prompt_count].shape[0]:], skip_special_tokens=True)) - ov_outputs = pipe.generate(prompts, **config).texts + ov_outputs = ov_pipe.generate(prompts, **config).texts hf_outputs.sort() ov_outputs.sort() @@ -67,8 +65,9 @@ def run_hf_ov_genai_comparison_batched(model_descr, generation_config: Dict, pro print(f'ov_output: {ov_output}') assert hf_output == ov_output -def run_hf_ov_genai_comparison(model_descr, generation_config: Dict, prompt: str): - model_id, path, tokenizer, model, pipe = model_descr + +def run_hf_ov_genai_comparison_text_inputs(model_descr, generation_config: Dict, prompt: str): + model_id, path, hf_tokenizer, opt_model, ov_pipe = model_descr config = generation_config.copy() # to avoid side effects @@ -85,12 +84,12 @@ def run_hf_ov_genai_comparison(model_descr, generation_config: Dict, prompt: str generation_config_hf['early_stopping'] = STOP_CRITERIA_MAP[generation_config_hf.pop('stop_criteria')] generation_config_hf.pop('ignore_eos', None) - encoded_prompt = tokenizer([prompt], return_tensors='pt', add_special_tokens=True) + encoded_prompt = hf_tokenizer([prompt], return_tensors='pt', add_special_tokens=True) prompt_ids, attention_mask = encoded_prompt['input_ids'], encoded_prompt['attention_mask'] - hf_encoded_output = model.generate(prompt_ids, attention_mask=attention_mask, **generation_config_hf) - hf_output = tokenizer.decode(hf_encoded_output[0, prompt_ids.shape[1]:], skip_special_tokens=True) + hf_encoded_output = opt_model.generate(prompt_ids, attention_mask=attention_mask, **generation_config_hf) + hf_output = hf_tokenizer.decode(hf_encoded_output[0, prompt_ids.shape[1]:], skip_special_tokens=True) - ov_output = pipe.generate(prompt, **config) + ov_output = ov_pipe.generate(prompt, **config) if config.get('num_return_sequences', 1) > 1: assert hf_output in ov_output.texts else: @@ -100,14 +99,15 @@ def run_hf_ov_genai_comparison(model_descr, generation_config: Dict, prompt: str assert hf_output == ov_output -def hf_ov_genai_tensors_comparison( + +def run_hf_ov_genai_comparison_encoded_inputs( model_descr, generation_config: Dict, input_ids: np.ndarray, attention_mask: Optional[np.array] = None ): device = 'CPU' - model_id, path, tokenizer, model, pipe = model_descr + model_id, path, hf_tokenizer, opt_model, ov_pipe = model_descr config = generation_config.copy() # to avoid side effects @@ -131,10 +131,8 @@ def hf_ov_genai_tensors_comparison( inputs_hf = dict(inputs=torch.tensor(input_ids)) inputs_ov = ov.Tensor(input_ids) - hf_output = model.generate(**inputs_hf, **generation_config_hf) - - pipe = ov_genai.LLMPipeline(path, device) - ov_output = pipe.generate(inputs_ov, **config) + hf_output = opt_model.generate(**inputs_hf, **generation_config_hf) + ov_output = ov_pipe.generate(inputs_ov, **config) hf_res = hf_output[0, input_ids.shape[1]:].numpy() ov_res = np.array(ov_output.tokens, dtype=np.int64) @@ -154,7 +152,8 @@ def hf_ov_genai_tensors_comparison( @pytest.mark.precommit @pytest.mark.nightly def test_decoding(model_descr, generation_config, prompt): - run_hf_ov_genai_comparison(read_model(model_descr), generation_config, prompt) + run_hf_ov_genai_comparison_text_inputs(read_model(model_descr), generation_config, prompt) + input_tensors_list = [ # input_ids, attention_mask @@ -165,62 +164,8 @@ def test_decoding(model_descr, generation_config, prompt): @pytest.mark.parametrize("model_descr", get_models_list()) @pytest.mark.precommit @pytest.mark.nightly -def test_ov_tensors(model_descr, inputs): - hf_ov_genai_tensors_comparison(read_model(model_descr), dict(max_new_tokens=20), *inputs) - - -prompts = [ - 'table is made of', - '你好! 你好嗎?', - 'Alan Turing was a', - 'The Sun is yellow because', - ['The Sun is yellow because', 'Alan Turing was a', 'Alan Turing was a'] -] -@pytest.mark.parametrize("model_descr", get_models_list()) -@pytest.mark.parametrize("prompt", prompts) -@pytest.mark.precommit -@pytest.mark.nightly -def test_genai_tokenizer_encode(model_descr, prompt): - model_id, path, tokenizer, model, pipe = read_model(model_descr) - tok = pipe.get_tokenizer() - - encoded_ov = tok.encode(prompt).input_ids.data - if isinstance(prompt, list): - encoded_hf = tokenizer.batch_encode_plus(prompt)['input_ids'] - for tokens_ov, tokens_hf in zip(encoded_ov, encoded_hf): - assert np.all(tokens_ov == tokens_hf) - else: - encoded_hf = tokenizer.encode(prompt) - assert np.all(encoded_hf == encoded_ov[0]) - -encoded_prompts = [ - [1, 1591, 338, 1754, 310], - [1, 17102, 323, 3864, 471, 263], - - # chineze characters - [1, 29871, 30919, 31076, 30584, 29871, 30919, 31076, 232, 154, 145, 30882], - - # On meta-llama/Meta-Llama-3-8B-Instruct this becomes longer after removing the last token - [3113, 264, 364, 267], - - # batched tokens - [[1, 1591, 338, 1754, 310], [1, 1591, 338, 1754, 310], [1, 17102, 323, 3864, 471, 263]] -] -@pytest.mark.parametrize("model_descr", get_models_list()) -@pytest.mark.parametrize("encoded_prompt", encoded_prompts) -@pytest.mark.precommit -def test_genai_tokenizer_decode(model_descr, encoded_prompt): - model_id, path, tokenizer, model, pipe = read_model(model_descr) - tok = pipe.get_tokenizer() - decoded_ov = tok.decode(encoded_prompt) - - if isinstance(encoded_prompt[0], list): - decoded_hf = tokenizer.batch_decode(encoded_prompt, skip_special_tokens=True) - for tokens_ov, tokens_hf in zip(decoded_ov, decoded_hf): - assert np.all(tokens_ov == tokens_hf) - else: - decoded_hf = tokenizer.decode(encoded_prompt, skip_special_tokens=True) - assert decoded_hf == decoded_ov +def test_encoded_inputs(model_descr, inputs): + run_hf_ov_genai_comparison_encoded_inputs(read_model(model_descr), dict(max_new_tokens=20), *inputs) test_configs = [ @@ -239,7 +184,7 @@ def test_genai_tokenizer_decode(model_descr, encoded_prompt): @pytest.mark.parametrize("model_descr", get_models_list()) @pytest.mark.precommit @pytest.mark.nightly -def test_multibatch(model_descr, generation_config, prompts): +def test_batch_text_input(model_descr, generation_config, prompts): run_hf_ov_genai_comparison_batched(read_model(model_descr), generation_config, prompts) @@ -261,7 +206,7 @@ def test_beam_search_decoding(model_descr, num_beam_groups, group_size, num_return_sequences=num_beam_groups * group_size, max_new_tokens=max_new_tokens, ) - run_hf_ov_genai_comparison(read_model(model_descr), generation_config, prompt) + run_hf_ov_genai_comparison_text_inputs(read_model(model_descr), generation_config, prompt) @pytest.mark.parametrize("stop_criteria", [StopCriteria.NEVER, StopCriteria.EARLY, StopCriteria.HEURISTIC]) @@ -283,7 +228,7 @@ def test_stop_criteria(model_descr, stop_criteria, prompt, max_new_tokens): max_new_tokens=max_new_tokens, stop_criteria=stop_criteria, ) - run_hf_ov_genai_comparison(read_model(model_descr), generation_config, prompt) + run_hf_ov_genai_comparison_text_inputs(read_model(model_descr), generation_config, prompt) # test long sequences @@ -302,7 +247,7 @@ def test_beam_search_long_sentences(model_descr, num_beam_groups, group_size, num_return_sequences=num_beam_groups * group_size, max_new_tokens=max_new_tokens, ) - run_hf_ov_genai_comparison(read_model(model_descr), generation_config, prompt) + run_hf_ov_genai_comparison_text_inputs(read_model(model_descr), generation_config, prompt) @pytest.mark.parametrize("prompt", prompts) @@ -317,17 +262,17 @@ def test_greedy_repetition_penalty(model_descr, prompt): max_new_tokens=20, do_sample=False ) - run_hf_ov_genai_comparison((model_id, path, tokenizer, model, pipe), generation_config, prompt) + run_hf_ov_genai_comparison_text_inputs((model_id, path, tokenizer, model, pipe), generation_config, prompt) generation_config = dict( repetition_penalty=1.0, max_new_tokens=20, do_sample=False ) - run_hf_ov_genai_comparison((model_id, path, tokenizer, model, pipe), generation_config, prompt) + run_hf_ov_genai_comparison_text_inputs((model_id, path, tokenizer, model, pipe), generation_config, prompt) ov_output = pipe.generate(prompt, **generation_config) - + generation_config = dict( repetition_penalty=0.5, max_new_tokens=20, @@ -346,19 +291,19 @@ def user_defined_callback(subword): @pytest.mark.precommit @pytest.mark.nightly def test_callback_one_string(callback): - pipe = read_model(get_models_list()[0])[4] - generation_config = pipe.get_generation_config() + ov_pipe = read_model(get_models_list()[0])[4] + generation_config = ov_pipe.get_generation_config() generation_config.max_new_tokens = 10 - pipe.generate('table is made of', generation_config, callback) + ov_pipe.generate('table is made of', generation_config, callback) @pytest.mark.parametrize("callback", [print, user_defined_callback, lambda subword: print(subword)]) @pytest.mark.precommit @pytest.mark.nightly -def test_callback_batch_fail(callback): - pipe = read_model(get_models_list()[0])[4] +def test_callback_batch_throws(callback): + ov_pipe = read_model(get_models_list()[0])[4] with pytest.raises(RuntimeError): - pipe.generate(['1', '2'], ov_genai.GenerationConfig(), callback) + ov_pipe.generate(['1', '2'], ov_pipe.get_generation_config(), callback) @pytest.mark.parametrize("callback", [print, user_defined_callback, lambda subword: print(subword)]) @@ -368,24 +313,25 @@ def test_callback_kwargs_one_string(callback): pipe = read_model(get_models_list()[0])[4] pipe.generate('table is made of', max_new_tokens=10, streamer=callback) + @pytest.mark.parametrize("callback", [print, user_defined_callback, lambda subword: print(subword)]) @pytest.mark.precommit @pytest.mark.nightly @pytest.mark.parametrize("model_descr", get_models_list()) def test_callback_decoding_metallama(model_descr, callback): - # On metallam this prompt generates output which can shorten after adding new tokens. + # On metallama this prompt generates output which can shorten after adding new tokens. # Test that streamer correctly handles such cases. prompt = 'I have an interview about product speccing with the company Weekend Health. Give me an example of a question they might ask with regards about a new feature' if model_descr[0] != 'meta-llama/Meta-Llama-3-8B-Instruct': pytest.skip() - pipe = read_model(model_descr)[4] - pipe.generate(prompt, max_new_tokens=300, streamer=callback) + ov_pipe = read_model(model_descr)[4] + ov_pipe.generate(prompt, max_new_tokens=300, streamer=callback) @pytest.mark.parametrize("callback", [print, user_defined_callback, lambda subword: print(subword)]) @pytest.mark.precommit @pytest.mark.nightly -def test_callback_kwargs_batch_fail(callback): +def test_callback_kwargs_batch_throws(callback): pipe = read_model(get_models_list()[0])[4] with pytest.raises(RuntimeError): pipe.generate(['1', '2'], max_new_tokens=10, streamer=callback) @@ -408,200 +354,73 @@ def end(self): @pytest.mark.precommit @pytest.mark.nightly def test_streamer_one_string(): - pipe = read_model(get_models_list()[0])[4] - generation_config = pipe.get_generation_config() + ov_pipe = read_model(get_models_list()[0])[4] + generation_config = ov_pipe.get_generation_config() generation_config.max_new_tokens = 10 - printer = Printer(pipe.get_tokenizer()) - pipe.generate('table is made of', generation_config, printer) + printer = Printer(ov_pipe.get_tokenizer()) + ov_pipe.generate('table is made of', generation_config, printer) @pytest.mark.precommit @pytest.mark.nightly -def test_streamer_batch_fail(): - pipe = read_model(get_models_list()[0])[4] - printer = Printer(pipe.get_tokenizer()) +def test_streamer_batch_throws(): + ov_pipe = read_model(get_models_list()[0])[4] + printer = Printer(ov_pipe.get_tokenizer()) with pytest.raises(RuntimeError): - pipe.generate(['1', '2'], ov_genai.GenerationConfig(), printer) + ov_pipe.generate(['1', '2'], ov_pipe.get_generation_config(), printer) @pytest.mark.precommit @pytest.mark.nightly def test_streamer_kwargs_one_string(): - pipe = read_model(get_models_list()[0])[4] - printer = Printer(pipe.get_tokenizer()) - pipe.generate('table is made of', max_new_tokens=10, do_sample=False, streamer=printer) + ov_pipe = read_model(get_models_list()[0])[4] + printer = Printer(ov_pipe.get_tokenizer()) + ov_pipe.generate('table is made of', max_new_tokens=10, do_sample=False, streamer=printer) @pytest.mark.precommit @pytest.mark.nightly -def test_streamer_kwargs_batch_fail(): - pipe = read_model(get_models_list()[0])[4] - printer = Printer(pipe.get_tokenizer()) +def test_streamer_kwargs_batch_throws(): + ov_pipe = read_model(get_models_list()[0])[4] + printer = Printer(ov_pipe.get_tokenizer()) with pytest.raises(RuntimeError): - pipe.generate('', num_beams=2, streamer=printer) + ov_pipe.generate('', num_beams=2, streamer=printer) @pytest.mark.precommit @pytest.mark.nightly @pytest.mark.parametrize("callback", [print, user_defined_callback, lambda subword: print(subword)]) def test_operator_with_callback_one_string(callback): - pipe = read_model(get_models_list()[0])[4] - ten_tokens = pipe.get_generation_config() + ov_pipe = read_model(get_models_list()[0])[4] + ten_tokens = ov_pipe.get_generation_config() ten_tokens.max_new_tokens = 10 - pipe('talbe is made of', ten_tokens, callback) + ov_pipe('talbe is made of', ten_tokens, callback) @pytest.mark.precommit @pytest.mark.nightly @pytest.mark.parametrize("callback", [print, user_defined_callback, lambda subword: print(subword)]) -def test_operator_with_callback_batch_fail(callback): - pipe = read_model(get_models_list()[0])[4] +def test_operator_with_callback_batch_throws(callback): + ov_pipe = read_model(get_models_list()[0])[4] with pytest.raises(RuntimeError): - pipe(['1', '2'], ov_genai.GenerationConfig(), callback) + ov_pipe(['1', '2'], ov_pipe.get_generation_config(), callback) @pytest.mark.precommit @pytest.mark.nightly def test_operator_with_streamer_kwargs_one_string(): - pipe = read_model(get_models_list()[0])[4] - printer = Printer(pipe.get_tokenizer()) - pipe('hi', max_new_tokens=10, do_sample=True, streamer=printer) + ov_pipe = read_model(get_models_list()[0])[4] + printer = Printer(ov_pipe.get_tokenizer()) + ov_pipe('hi', max_new_tokens=10, do_sample=True, streamer=printer) @pytest.mark.precommit @pytest.mark.nightly -def test_operator_with_streamer_kwargs_batch_fail(): - pipe = read_model(get_models_list()[0])[4] - printer = Printer(pipe.get_tokenizer()) +def test_operator_with_streamer_kwargs_batch_throws(): + ov_pipe = read_model(get_models_list()[0])[4] + printer = Printer(ov_pipe.get_tokenizer()) with pytest.raises(RuntimeError): - pipe('', num_beams=2, streamer=printer) - - -@pytest.mark.precommit -@pytest.mark.nightly -def test_load_special_tokens_ids_1(model_tmp_path): - # test when there is an available config.json - config_json = { - "pad_token_id": 422, - "bos_token_id": 42, - "eos_token_id": 37, - } - tok = load_tok([(config_json, "config.json")], model_tmp_path[1]) - assert tok.get_pad_token_id() == config_json['pad_token_id'] - assert tok.get_bos_token_id() == config_json['bos_token_id'] - assert tok.get_eos_token_id() == config_json['eos_token_id'] - - -@pytest.mark.precommit -@pytest.mark.nightly -def test_load_special_tokens_str_2(model_tmp_path): - # test with special_tokens_map - special_tokens_map_json = { - "pad_token": {"content": ""}, - "bos_token": {"content": ""}, - "eos_token": {"content": ""}, - } - tok = load_tok([(special_tokens_map_json, "special_tokens_map.json")], model_tmp_path[1]) - assert tok.get_pad_token() == special_tokens_map_json['pad_token']["content"] - assert tok.get_bos_token() == special_tokens_map_json['bos_token']["content"] - assert tok.get_eos_token() == special_tokens_map_json['eos_token']["content"] - - -@pytest.mark.precommit -@pytest.mark.nightly -@pytest.mark.skip(reason="CVS-158682 - RTInfo is not modified in tests for unknown reasons") -def test_load_special_tokens_3_(model_tokenizers_path_tmp_path): - # special_tokens_map is not available - # but tokenize_config.json exists - # will load both string and integer representations - tok_config_json = { - "added_tokens_decoder": { - "422": {"content": ""}, - "37": {"content": ""}, - "42": {"content": ""}, - }, - "pad_token": "", - "bos_token": "", - "eos_token": "", - } - - tok = load_tok([(tok_config_json, "tokenizer_config.json")], model_tokenizers_path_tmp_path[1]) - assert tok.get_pad_token() == tok_config_json['pad_token'] - assert tok.get_bos_token() == tok_config_json['bos_token'] - assert tok.get_eos_token() == tok_config_json['eos_token'] - - assert tok.get_pad_token_id() == 422 - assert tok.get_bos_token_id() == 37 - assert tok.get_eos_token_id() == 42 - - -@pytest.mark.precommit -@pytest.mark.nightly -def test_load_special_tokens_3(model_tmp_path): - # both config.json is available and tokenizer_config.json available - # check that it does not read int values from tokenizer_config.json if they are in config.json - tok_config_json = { - "added_tokens_decoder": { - # integers differ from config.json to check they don't override config.json - "777": {"content": ""}, - "888": {"content": ""}, - "656": {"content": ""}, - }, - "pad_token": "", - "bos_token": "", - "eos_token": "", - } - config_json = { - "pad_token_id": 422, - "bos_token_id": 42, - "eos_token_id": 37, - } - configs = [ - (tok_config_json, "tokenizer_config.json"), - (config_json, "config.json") - ] - tok = load_tok(configs, model_tmp_path[1]) - assert tok.get_pad_token_id() == config_json['pad_token_id'] - assert tok.get_bos_token_id() == config_json['bos_token_id'] - assert tok.get_eos_token_id() == config_json['eos_token_id'] - - assert tok.get_pad_token() == tok_config_json['pad_token'] - assert tok.get_bos_token() == tok_config_json['bos_token'] - assert tok.get_eos_token() == tok_config_json['eos_token'] - - -@pytest.mark.precommit -@pytest.mark.nightly -@pytest.mark.xfail( - raises=AssertionError, - reason="CVS-143410 ov tokenizer should be aligned with hf", - strict=False, -) -def test_load_special_tokens_4(model_tmp_path): - # only string representation is provided, find token integers by inference - model_id, temp_path = model_tmp_path - tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) - - special_tokens_map_json = {} - token_str_int_map = {} - special_token_names = ['pad_token', 'bos_token', 'eos_token'] - for token_str in special_token_names: - if hasattr(tokenizer, token_str): - token_val = getattr(tokenizer, token_str) - special_tokens_map_json.update({token_str: {"content": token_val}}) - token_id = tokenizer(token_val, add_special_tokens=False)['input_ids'][0] - token_str_int_map.update({token_str: token_id}) - - # since only string representations are present in the json will try to get by inference - tok = load_tok([(special_tokens_map_json, "special_tokens_map.json")], temp_path) - - # check ids inferred correctly for special tokens existing if HF tokenizer - if 'pad_token' in token_str_int_map: - assert tok.get_pad_token_id() == token_str_int_map['pad_token'] - if 'bos_token' in token_str_int_map: - assert tok.get_bos_token_id() == token_str_int_map['bos_token'] - if 'eos_token' in token_str_int_map: - assert tok.get_eos_token_id() == token_str_int_map['eos_token'] + ov_pipe('', num_beams=2, streamer=printer) invalid_configs = [ @@ -617,23 +436,24 @@ def test_load_special_tokens_4(model_tmp_path): @pytest.mark.parametrize("generation_config", invalid_configs) @pytest.mark.precommit @pytest.mark.nightly -def test_invalid_configs(model_tmp_path, generation_config): +def test_invalid_generation_configs_throws(model_tmp_path, generation_config): model_id, temp_path = model_tmp_path config_json = {} - pipe = load_pipe([(config_json, "config.json")], temp_path) + ov_pipe = load_genai_pipe_with_configs([(config_json, "config.json")], temp_path) with pytest.raises(RuntimeError): - pipe.generate('blah blah', **generation_config) + ov_pipe.generate('blah blah', **generation_config) @pytest.mark.precommit @pytest.mark.nightly def test_valid_configs(model_tmp_path): model_id, temp_path = model_tmp_path - pipe = load_pipe([({"eos_token_id": 37}, "config.json")], temp_path) + ov_pipe = load_genai_pipe_with_configs([({"eos_token_id": 37}, "config.json")], temp_path) config = ov_genai.GenerationConfig() config.do_sample = True # no eos_token_id but it's loaded from config.json - pipe.set_generation_config(config) + ov_pipe.set_generation_config(config) + invalid_py_configs = [ dict(num_beam_groups=3, num_beams=15, do_sample=True), @@ -648,49 +468,48 @@ def test_valid_configs(model_tmp_path): @pytest.mark.precommit @pytest.mark.nightly @pytest.mark.parametrize("generation_config", invalid_py_configs) -def test_python_generation_config_validation(model_tmp_path, generation_config): +def test_python_generation_config_validation_throws(model_tmp_path, generation_config): model_id, temp_path = model_tmp_path - pipe = load_pipe([({"eos_token_id": 37}, "config.json")], temp_path) - + ov_pipe = load_genai_pipe_with_configs([({"eos_token_id": 37}, "config.json")], temp_path) + # 'unexisting_key_name' key validity is checked in pybind and ValueError will be returned # instead of RuntimeError, which is returned when GenerationConfig values are validated return_exception_type = ValueError if 'unexisting_key_name' in generation_config else RuntimeError with pytest.raises(return_exception_type): - pipe.set_generation_config(ov_genai.GenerationConfig(**generation_config)) + ov_pipe.set_generation_config(ov_genai.GenerationConfig(**generation_config)) @pytest.mark.precommit @pytest.mark.nightly -def test_unicode_pybind_decoding_1(): +def test_unicode_pybind_decoding_one_string(): # On this model this prompt generates unfinished utf string. # Test that pybind will not fail. model_id, path = 'katuni4ka/tiny-random-phi3', Path('tiny-random-phi3') - pipe = read_model((model_id, path))[4] - res_str = pipe.generate(',', max_new_tokens=4) + ov_pipe = read_model((model_id, path))[4] + res_str = ov_pipe.generate(',', max_new_tokens=4) assert '�' == res_str[-1] - @pytest.mark.precommit @pytest.mark.nightly -def test_unicode_pybind_decoding_2(): +def test_unicode_pybind_decoding_batched(): # On this model this prompt generates unfinished utf string. # Test that pybind will not fail. model_id, path = 'katuni4ka/tiny-random-phi3', Path('tiny-random-phi3') - pipe = read_model((model_id, path))[4] - res_str = pipe.generate([","], max_new_tokens=4) + ov_pipe = read_model((model_id, path))[4] + res_str = ov_pipe.generate([","], max_new_tokens=4) assert '�' == res_str.texts[0][-1] @pytest.mark.precommit @pytest.mark.nightly -def test_unicode_pybind_decoding_3(): +def test_unicode_pybind_decoding_one_string_streamer(): # On this model this prompt generates unfinished utf-8 string # and streams it. Test that pybind will not fail while we pass string to python. model_id, path = 'katuni4ka/tiny-random-phi3', Path('tiny-random-phi3') - pipe = read_model((model_id, path))[4] + ov_pipe = read_model((model_id, path))[4] res_str = [] - pipe.generate(",", max_new_tokens=4, streamer=lambda x: res_str.append(x)) + ov_pipe.generate(",", max_new_tokens=4, streamer=lambda x: res_str.append(x)) assert '�' == res_str[-1] @@ -741,22 +560,24 @@ def test_continuous_batching_vs_stateful(prompt, generation_config): for gen, ref in zip(generated.scores, reference.scores): assert math.isclose(gen, ref, abs_tol=0.0003) + @pytest.mark.parametrize("prompt", prompts) @pytest.mark.precommit def test_cb_streamer_vs_return_vs_stateful(prompt): - model_id, path, tokenizer, model, stateful = read_model(( + model_id, path, hf_tokenizer, opt_model, ov_pipe = read_model(( "facebook/opt-125m", Path("opt-125m") )) - cb = get_continuous_batching(path) + cb_pipe = get_continuous_batching(path) streamed = [] - generated = cb.generate(prompt, max_new_tokens=20, streamer=lambda subword: streamed.append(subword)) - reference = stateful.generate(prompt, max_new_tokens=20) + generated = cb_pipe.generate(prompt, max_new_tokens=20, streamer=lambda subword: streamed.append(subword)) + reference = ov_pipe.generate(prompt, max_new_tokens=20) assert generated == "".join(streamed) assert "".join(streamed) == reference + def run_perf_metrics_collection(model_descr, generation_config: Dict, prompt: str) -> ov_genai.PerfMetrics: - model_id, path, tokenizer, model, pipe = model_descr + model_id, path, hf_tokenizer, opt_model, ov_pipe = model_descr config = generation_config.copy() # to avoid side effects @@ -767,7 +588,7 @@ def run_perf_metrics_collection(model_descr, generation_config: Dict, prompt: st # Do not apply 'repetition_penalty' if sampling is not used. config['do_sample'] = False config['repetition_penalty'] = 1.0 # 1.0 means no penalty - return pipe.generate([prompt], **config).perf_metrics + return ov_pipe.generate([prompt], **config).perf_metrics test_cases = [ @@ -851,19 +672,19 @@ def test_perf_metrics(model_descr, generation_config, prompt): @pytest.mark.precommit @pytest.mark.nightly def test_batch_switch(): - pipe = read_model(('katuni4ka/tiny-random-phi3', Path('tiny-random-phi3')))[4] - pipe.generate(["a"], max_new_tokens=2) - pipe.generate(["1", "2"], max_new_tokens=2) + ov_pipe = read_model(('katuni4ka/tiny-random-phi3', Path('tiny-random-phi3')))[4] + ov_pipe.generate(["a"], max_new_tokens=2) + ov_pipe.generate(["1", "2"], max_new_tokens=2) @pytest.mark.precommit @pytest.mark.nightly def test_stop_token_ids(): - pipe = read_model(('katuni4ka/tiny-random-phi3', Path('tiny-random-phi3')))[4] - res = pipe.generate( + ov_pipe = read_model(('katuni4ka/tiny-random-phi3', Path('tiny-random-phi3')))[4] + res = ov_pipe.generate( ov.Tensor([(1,)]), max_new_tokens=3, - stop_token_ids={-1, 9935, pipe.get_tokenizer().get_eos_token_id()}, + stop_token_ids={-1, 9935, ov_pipe.get_tokenizer().get_eos_token_id()}, include_stop_str_in_output=False ) assert 2 == len(res.tokens[0]) @@ -873,8 +694,8 @@ def test_stop_token_ids(): @pytest.mark.precommit @pytest.mark.nightly def test_stop_strings(): - pipe = read_model(('katuni4ka/tiny-random-phi3', Path('tiny-random-phi3')))[4] - res = pipe.generate( + ov_pipe = read_model(('katuni4ka/tiny-random-phi3', Path('tiny-random-phi3')))[4] + res = ov_pipe.generate( "", max_new_tokens=5, stop_strings={"ignored", "боль"} diff --git a/tests/python_tests/test_preemption.py b/tests/python_tests/test_preemption.py index 49d6c8f6b0..7c648e73dc 100644 --- a/tests/python_tests/test_preemption.py +++ b/tests/python_tests/test_preemption.py @@ -4,7 +4,7 @@ import pytest from openvino_genai import GenerationConfig -from common import get_model_and_tokenizer, save_ov_model_from_optimum, generate_and_compare_with_reference_text, \ +from common import get_hugging_face_model_and_tokenizer, save_ov_model_from_optimum, generate_and_compare_with_reference_text, \ get_scheduler_config, run_test_pipeline, get_beam_search, get_greedy, \ get_multinomial_all_parameters, get_multinomial_temperature_and_num_return_sequence, \ get_multinomial_temperature_and_top_k, get_multinomial_temperature, get_multinomial_temperature_and_top_p @@ -87,7 +87,7 @@ def test_preemption_with_multinomial(tmp_path, dynamic_split_fuse): config.rng_seed = 0 config.max_new_tokens = 30 model_id : str = "facebook/opt-125m" - model, hf_tokenizer = get_model_and_tokenizer(model_id, use_optimum=True) + model, hf_tokenizer = get_hugging_face_model_and_tokenizer(model_id, use_optimum=True) models_path : Path = tmp_path / model_id save_ov_model_from_optimum(model, hf_tokenizer, models_path) @@ -168,7 +168,7 @@ def test_preemption_with_multinomial_n_seq(tmp_path, dynamic_split_fuse): for config in generation_configs: config.rng_seed = 0 model_id : str = "facebook/opt-125m" - model, hf_tokenizer = get_model_and_tokenizer(model_id, use_optimum=True) + model, hf_tokenizer = get_hugging_face_model_and_tokenizer(model_id, use_optimum=True) models_path : Path = tmp_path / model_id save_ov_model_from_optimum(model, hf_tokenizer, models_path) diff --git a/tests/python_tests/test_sampling.py b/tests/python_tests/test_sampling.py index d5df28bfd6..fbcce76bf7 100644 --- a/tests/python_tests/test_sampling.py +++ b/tests/python_tests/test_sampling.py @@ -10,7 +10,7 @@ from openvino_genai import ContinuousBatchingPipeline, GenerationConfig, Tokenizer from typing import List, TypedDict -from common import run_test_pipeline, get_models_list, get_model_and_tokenizer, save_ov_model_from_optimum, \ +from common import run_test_pipeline, read_models_list, get_hugging_face_model_and_tokenizer, save_ov_model_from_optimum, \ generate_and_compare_with_reference_text, get_greedy, get_beam_search, get_multinomial_temperature, \ get_greedy_with_penalties, get_multinomial_temperature, \ get_multinomial_temperature_and_top_k, get_multinomial_temperature_and_top_p, \ @@ -28,18 +28,18 @@ @pytest.mark.precommit -@pytest.mark.parametrize("model_id", get_models_list(os.path.join(os.path.dirname(os.path.realpath(__file__)), "models", "precommit"))) +@pytest.mark.parametrize("model_id", read_models_list(os.path.join(os.path.dirname(os.path.realpath(__file__)), "models", "precommit"))) def test_sampling_precommit(tmp_path, model_id): run_test_pipeline(tmp_path, model_id) @pytest.mark.nightly -@pytest.mark.parametrize("model_id", get_models_list(os.path.join(os.path.dirname(os.path.realpath(__file__)), "models", "nightly"))) +@pytest.mark.parametrize("model_id", read_models_list(os.path.join(os.path.dirname(os.path.realpath(__file__)), "models", "nightly"))) def test_sampling_nightly(tmp_path, model_id): run_test_pipeline(tmp_path, model_id) @pytest.mark.real_models -@pytest.mark.parametrize("model_id", get_models_list(os.path.join(os.path.dirname(os.path.realpath(__file__)), "models", "real_models"))) +@pytest.mark.parametrize("model_id", read_models_list(os.path.join(os.path.dirname(os.path.realpath(__file__)), "models", "real_models"))) def test_real_models(tmp_path, model_id): run_test_pipeline(tmp_path, model_id) @@ -313,7 +313,7 @@ def test_individual_generation_configs_random(tmp_path, test_struct: RandomSampl generation_config.rng_seed = 0 generation_configs = [generation_config] model_id : str = "facebook/opt-125m" - model, hf_tokenizer = get_model_and_tokenizer(model_id, use_optimum=True) + model, hf_tokenizer = get_hugging_face_model_and_tokenizer(model_id, use_optimum=True) models_path : Path = tmp_path / model_id save_ov_model_from_optimum(model, hf_tokenizer, models_path) @@ -337,12 +337,12 @@ def test_echo_without_completion(tmp_path, get_generation_config, max_num_batche scheduler_config.max_num_batched_tokens = max_num_batched_tokens generation_configs = [generation_config] model_id : str = "facebook/opt-125m" - model, hf_tokenizer = get_model_and_tokenizer(model_id, use_optimum=True) + model, hf_tokenizer = get_hugging_face_model_and_tokenizer(model_id, use_optimum=True) model_path : Path = tmp_path / model_id save_ov_model_from_optimum(model, hf_tokenizer, model_path) - pipe = ContinuousBatchingPipeline(model_path.absolute().as_posix(), Tokenizer(model_path.absolute().as_posix()), scheduler_config, "CPU", {}) + pipe = ContinuousBatchingPipeline(model_path, Tokenizer(model_path), scheduler_config, "CPU") outputs = pipe.generate(["What is OpenVINO?"], generation_configs) assert(len(outputs)) @@ -364,12 +364,12 @@ def test_echo_with_completion(tmp_path, get_generation_config, max_num_batched_t scheduler_config.max_num_batched_tokens = max_num_batched_tokens generation_configs = [generation_config] model_id : str = "facebook/opt-125m" - model, hf_tokenizer = get_model_and_tokenizer(model_id, use_optimum=True) + model, hf_tokenizer = get_hugging_face_model_and_tokenizer(model_id, use_optimum=True) model_path : Path = tmp_path / model_id save_ov_model_from_optimum(model, hf_tokenizer, model_path) - pipe = ContinuousBatchingPipeline(model_path.absolute().as_posix(), Tokenizer(model_path.absolute().as_posix()), scheduler_config, "CPU", {}) + pipe = ContinuousBatchingPipeline(model_path, Tokenizer(model_path), scheduler_config, "CPU") outputs = pipe.generate(["What is OpenVINO?"], generation_configs) assert(len(outputs)) @@ -392,12 +392,12 @@ def test_post_oom_health(tmp_path, sampling_config): scheduler_config.num_kv_blocks = 10 generation_configs = [generation_config] model_id : str = "facebook/opt-125m" - model, hf_tokenizer = get_model_and_tokenizer(model_id, use_optimum=True) + model, hf_tokenizer = get_hugging_face_model_and_tokenizer(model_id, use_optimum=True) models_path : Path = tmp_path / model_id save_ov_model_from_optimum(model, hf_tokenizer, models_path) - pipe = ContinuousBatchingPipeline(models_path.absolute().as_posix(), Tokenizer(models_path.absolute().as_posix()), scheduler_config, "CPU", {}) + pipe = ContinuousBatchingPipeline(models_path, Tokenizer(models_path), scheduler_config, "CPU") # First run should return incomplete response output = pipe.generate(["What is OpenVINO?"], generation_configs) assert (len(output)) diff --git a/tests/python_tests/test_tokenizer.py b/tests/python_tests/test_tokenizer.py new file mode 100644 index 0000000000..0c2a106d50 --- /dev/null +++ b/tests/python_tests/test_tokenizer.py @@ -0,0 +1,360 @@ +# Copyright (C) 2023-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import numpy as np +from transformers import AutoTokenizer +from typing import Dict, Tuple, List +import openvino_genai +import json + +from ov_genai_test_utils import ( + get_models_list, + get_chat_models_list, + read_model, + model_tmp_path +) + + +def load_genai_tokenizer_with_configs(configs: List[Tuple], temp_path): + # load Tokenizer where all configs are cleared. + # remove existing jsons from previous tests + for json_file in temp_path.glob("*.json"): + json_file.unlink() + + for config_json, config_name in configs: + with (temp_path / config_name).open('w') as f: + json.dump(config_json, f) + return openvino_genai.Tokenizer(temp_path) + + +def get_chat_templates(): + # Returns chat templates saved in tokenizer_configs.py, + # but skips some models that currently are not processed correctly. + + skipped_models = { + # TODO: openchat/openchat_3.5 and berkeley-nest/Starling-LM-7B-alpha have the same template. + # Need to enable and unskip, since it's preset in continuous batching and has >100 000 downloads. + "openchat/openchat-3.5-0106", + + # These models fail even on HF so no need to check if applying chat matches. + "vibhorag101/llama-2-13b-chat-hf-phr_mental_therapy", + "codellama/CodeLlama-34b-Instruct-hf", + "deepseek-ai/deepseek-math-7b-rl", + "allenai/tulu-2-7b", + "alexsobolev/IcaroLM", + "tokyotech-llm/Swallow-7b-instruct-v0.1", + "bofenghuang/vigogne-2-7b-chat", + "OpenBuddy/openbuddy-mistral2-7b-v20.3-32k", + "AliAbdelrasheed/maqa_llama_4bit", + "stephenlzc/Mistral-7B-v0.3-Chinese-Chat-uncensored", + + # TODO: Need to support chat templates in more models: CVS-145963 + # Either ov_genai is unable to parse chat_template or results do not match with HF. + "meta-llama/Meta-Llama-3-8B-Instruct", + "databricks/dbrx-instruct", # Chat template is not supported by Jinja2Cpp + "mosaicml/mpt-30b-chat", + "deepseek-ai/deepseek-coder-6.7b-instruct", # Chat template is not supported by Jinja2Cpp + "maldv/winter-garden-7b-alpha", # Chat template is not supported by Jinja2Cpp + "ishorn5/RTLCoder-Deepseek-v1.1", # Chat template is not supported by Jinja2Cpp + "openchat/openchat-3.5-0106", + "casperhansen/llama-3-70b-instruct-awq", + "TheBloke/deepseek-coder-33B-instruct-GPTQ", + "AI-Sweden-Models/gpt-sw3-356m-instruct", + "google/gemma-7b-it", + "THUDM/cogvlm2-llama3-chat-19B", + "KnutJaegersberg/internlm-20b-llama", + "maywell/Synatra-Mixtral-8x7B", + "MediaTek-Research/Breeze-7B-Instruct-v1_0", + "bofenghuang/vigostral-7b-chat", + "meetkai/functionary-small-v2.5", # Chat template is not supported by Jinja2Cpp + "openchat/openchat-3.6-8b-20240522", + "tenyx/TenyxChat-7B-v1", + "LoneStriker/TinyLlama-1.1B-32k-Instruct-3.0bpw-h6-exl2", + "yam-peleg/Hebrew-Gemma-11B-V2", + "shenzhi-wang/Llama3-8B-Chinese-Chat", # AssertionError + "nlpai-lab/KULLM3", + "HuggingFaceH4/zephyr-7b-gemma-sft-v0.1", + "MediaTek-Research/Breeze-7B-Instruct-v0_1", + "shanchen/llama3-8B-slerp-biomed-chat-chinese", # AssertionError + "MLP-KTLim/llama-3-Korean-Bllossom-8B", + "aloobun/CosmicBun-8B", # Chat template is not supported by Jinja2Cpp + "codellama/CodeLlama-70b-Instruct-hf", + "gorilla-llm/gorilla-openfunctions-v2", # Chat template is not supported by Jinja2Cpp + "BramVanroy/Llama-2-13b-chat-dutch" + } + + from tokenizer_configs import get_tokenizer_configs + return [(k, v) for k, v in get_tokenizer_configs().items() if k not in skipped_models] + + +prompts = [ + 'table is made of', + '你好! 你好嗎?', + 'Alan Turing was a', + 'The Sun is yellow because', + ['The Sun is yellow because', 'Alan Turing was a', 'Alan Turing was a'] +] +@pytest.mark.parametrize("model_descr", get_models_list()) +@pytest.mark.parametrize("prompt", prompts) +@pytest.mark.precommit +@pytest.mark.nightly +def test_encode(model_descr, prompt): + model_id, path, hf_tokenizer, opt_model, ov_pipe = read_model(model_descr) + ov_tokenizer = ov_pipe.get_tokenizer() + + encoded_ov = ov_tokenizer.encode(prompt).input_ids.data + if isinstance(prompt, list): + encoded_hf = hf_tokenizer.batch_encode_plus(prompt)['input_ids'] + for tokens_ov, tokens_hf in zip(encoded_ov, encoded_hf): + assert np.all(tokens_ov == tokens_hf) + else: + encoded_hf = hf_tokenizer.encode(prompt) + assert np.all(encoded_hf == encoded_ov[0]) + + +encoded_prompts = [ + [1, 1591, 338, 1754, 310], + [1, 17102, 323, 3864, 471, 263], + + # chineze characters + [1, 29871, 30919, 31076, 30584, 29871, 30919, 31076, 232, 154, 145, 30882], + + # On meta-llama/Meta-Llama-3-8B-Instruct this becomes longer after removing the last token + [3113, 264, 364, 267], + + # batched tokens + [[1, 1591, 338, 1754, 310], [1, 1591, 338, 1754, 310], [1, 17102, 323, 3864, 471, 263]] +] +@pytest.mark.parametrize("model_descr", get_models_list()) +@pytest.mark.parametrize("encoded_prompt", encoded_prompts) +@pytest.mark.precommit +def test_decode(model_descr, encoded_prompt): + model_id, path, hf_tokenizer, opt_model, ov_pipe = read_model(model_descr) + ov_tokenizer = ov_pipe.get_tokenizer() + decoded_ov = ov_tokenizer.decode(encoded_prompt) + + if isinstance(encoded_prompt[0], list): + decoded_hf = hf_tokenizer.batch_decode(encoded_prompt, skip_special_tokens=True) + for tokens_ov, tokens_hf in zip(decoded_ov, decoded_hf): + assert np.all(tokens_ov == tokens_hf) + else: + decoded_hf = hf_tokenizer.decode(encoded_prompt, skip_special_tokens=True) + assert decoded_hf == decoded_ov + + +conversation = [ + {'role': 'user', 'content': '1+1='}, + {'role': 'assistant', 'content': '1 + 1 = 2'}, + {'role': 'user', 'content': 'What is the previous answer?'}, + {'role': 'assistant', 'content': 'The previous answer was: 1 + 1 = 2. Please ask me your next question.'}, + {'role': 'user', 'content': 'Why is the sun yellow?'}, + {'role': 'assistant', 'content': 'Because it emits yeloow light.'}, + {'role': 'user', 'content': 'What was my first question?'}, +] +@pytest.mark.precommit +@pytest.mark.nightly +@pytest.mark.parametrize('chat_config', get_chat_templates()) +def test_apply_chat_template(model_tmp_path, chat_config: Tuple[str, Dict]): + tokenizer_config = chat_config[1] + + # Will load openvino_model for tiny-random-phi as a placeholder + # but indeed only Tokenizer and apply_chat_template will be tested. + model_id, path, hf_tokenizer, opt_model, ov_pipe = read_model(get_models_list()[0]) + + hf_full_history_str = hf_tokenizer.apply_chat_template(conversation, + add_generation_prompt=False, + tokenize=False, + **tokenizer_config) + + ov_tokenizer = load_genai_tokenizer_with_configs([(tokenizer_config, "tokenizer_config.json")], model_tmp_path[1]) + ov_tokenizer.set_chat_template(tokenizer_config['chat_template']) + ov_full_history_str = ov_tokenizer.apply_chat_template(conversation, add_generation_prompt=False) + + if ov_full_history_str != hf_full_history_str: + print(f'hf reference: {hf_full_history_str}') + print(f'ov_genai out: {ov_full_history_str}') + assert ov_full_history_str == hf_full_history_str + + +@pytest.mark.precommit +@pytest.mark.nightly +def test_set_chat_template(): + model_descr = get_chat_models_list()[0] + model_id, path, hf_tokenizer, model_opt, ov_pipe = read_model((model_descr[0], model_descr[1] / '_test_chat')) + + prompt = "how are you?" + dummy_conversation = [ + {'role': 'user', 'content': prompt}, + ] + + ov_tokenizer = ov_pipe.get_tokenizer() + identity_chat_template = "{% for message in messages %}{{ message['content'] }}{% endfor %}" + + templated_prompt_inline = ov_tokenizer.apply_chat_template(dummy_conversation, add_generation_prompt=False, chat_template=identity_chat_template) + + ov_tokenizer.set_chat_template(identity_chat_template) + templated_prompt = ov_tokenizer.apply_chat_template(dummy_conversation, add_generation_prompt=False) + + assert templated_prompt_inline == templated_prompt + assert prompt == templated_prompt + + +prompts = [ + '1+1=', + 'What is the previous answer?', + 'Why is the Sun yellow?', + 'What was my first question?', + ['Why is the Sun yellow?'], + "若我有一亿美元,在人工智能盛行的今天,我怎样投资才能收益最大化?", + "מחרוזת בדיקה", + "Multiline\nstring!\nWow!", +] +@pytest.mark.precommit +@pytest.mark.nightly +@pytest.mark.parametrize("add_special_tokens", [True, False]) +@pytest.mark.parametrize("skip_special_tokens", [True, False]) +@pytest.mark.parametrize("prompt", prompts) +def test_encode_decode_with_special_tokens_option(add_special_tokens, skip_special_tokens, prompt): + import numpy as np + model_descr = get_chat_models_list()[0] + model_id, path, hf_tokenizer, model_opt, ov_pipe = read_model((model_descr[0], model_descr[1] / '_test_chat')) + ov_tokenzier = ov_pipe.get_tokenizer() + + # Calling encode with 'add_special_tokens' will set state flag. + ov_res = ov_tokenzier.encode(prompt, add_special_tokens=add_special_tokens).input_ids.data + hf_res = hf_tokenizer(prompt, return_tensors="np", add_special_tokens=add_special_tokens)["input_ids"] + assert np.all(ov_res == hf_res) + + # Decode with 'skip_special_tokens' + decoded_genai = ov_tokenzier.decode(ov_res, skip_special_tokens=skip_special_tokens)[0] + decoded_hf = hf_tokenizer.decode(hf_res[0], skip_special_tokens=skip_special_tokens) + assert decoded_genai == decoded_hf + + +@pytest.mark.precommit +@pytest.mark.nightly +def test_load_special_tokens_from_config_json(model_tmp_path): + # test when there is an available config.json + config_json = { + "pad_token_id": 422, + "bos_token_id": 42, + "eos_token_id": 37, + } + tok = load_genai_tokenizer_with_configs([(config_json, "config.json")], model_tmp_path[1]) + assert tok.get_pad_token_id() == config_json['pad_token_id'] + assert tok.get_bos_token_id() == config_json['bos_token_id'] + assert tok.get_eos_token_id() == config_json['eos_token_id'] + + +@pytest.mark.precommit +@pytest.mark.nightly +def test_load_special_tokens_from_special_tokens_map_json(model_tmp_path): + # test with special_tokens_map + special_tokens_map_json = { + "pad_token": {"content": ""}, + "bos_token": {"content": ""}, + "eos_token": {"content": ""}, + } + tok = load_genai_tokenizer_with_configs([(special_tokens_map_json, "special_tokens_map.json")], model_tmp_path[1]) + assert tok.get_pad_token() == special_tokens_map_json['pad_token']["content"] + assert tok.get_bos_token() == special_tokens_map_json['bos_token']["content"] + assert tok.get_eos_token() == special_tokens_map_json['eos_token']["content"] + + +@pytest.mark.precommit +@pytest.mark.nightly +@pytest.mark.skip(reason="CVS-158682 - RTInfo is not modified in tests for unknown reasons") +def test_load_special_tokens_from_tokenizer_config_json(model_tokenizers_path_tmp_path): + # special_tokens_map is not available + # but tokenize_config.json exists + # will load both string and integer representations + tok_config_json = { + "added_tokens_decoder": { + "422": {"content": ""}, + "37": {"content": ""}, + "42": {"content": ""}, + }, + "pad_token": "", + "bos_token": "", + "eos_token": "", + } + + tok = load_genai_tokenizer_with_configs([(tok_config_json, "tokenizer_config.json")], model_tokenizers_path_tmp_path[1]) + assert tok.get_pad_token() == tok_config_json['pad_token'] + assert tok.get_bos_token() == tok_config_json['bos_token'] + assert tok.get_eos_token() == tok_config_json['eos_token'] + + assert tok.get_pad_token_id() == 422 + assert tok.get_bos_token_id() == 37 + assert tok.get_eos_token_id() == 42 + + +@pytest.mark.precommit +@pytest.mark.nightly +def test_load_special_tokens_from_tokenizer_config_and_config_json(model_tmp_path): + # both config.json is available and tokenizer_config.json available + # check that it does not read int values from tokenizer_config.json if they are in config.json + tok_config_json = { + "added_tokens_decoder": { + # integers differ from config.json to check they don't override config.json + "777": {"content": ""}, + "888": {"content": ""}, + "656": {"content": ""}, + }, + "pad_token": "", + "bos_token": "", + "eos_token": "", + } + config_json = { + "pad_token_id": 422, + "bos_token_id": 42, + "eos_token_id": 37, + } + configs = [ + (tok_config_json, "tokenizer_config.json"), + (config_json, "config.json") + ] + tok = load_genai_tokenizer_with_configs(configs, model_tmp_path[1]) + assert tok.get_pad_token_id() == config_json['pad_token_id'] + assert tok.get_bos_token_id() == config_json['bos_token_id'] + assert tok.get_eos_token_id() == config_json['eos_token_id'] + + assert tok.get_pad_token() == tok_config_json['pad_token'] + assert tok.get_bos_token() == tok_config_json['bos_token'] + assert tok.get_eos_token() == tok_config_json['eos_token'] + + +@pytest.mark.precommit +@pytest.mark.nightly +@pytest.mark.xfail( + raises=AssertionError, + reason="CVS-143410 ov tokenizer should be aligned with hf", + strict=False, +) +def test_load_special_tokens_from_special_tokens_map_json_with_string_repr(model_tmp_path): + # only string representation is provided, find token integers by inference + model_id, temp_path = model_tmp_path + tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) + + special_tokens_map_json = {} + token_str_int_map = {} + special_token_names = ['pad_token', 'bos_token', 'eos_token'] + for token_str in special_token_names: + if hasattr(tokenizer, token_str): + token_val = getattr(tokenizer, token_str) + special_tokens_map_json.update({token_str: {"content": token_val}}) + token_id = tokenizer(token_val, add_special_tokens=False)['input_ids'][0] + token_str_int_map.update({token_str: token_id}) + + # since only string representations are present in the json will try to get by inference + tok = load_genai_tokenizer_with_configs([(special_tokens_map_json, "special_tokens_map.json")], temp_path) + + # check ids inferred correctly for special tokens existing if HF tokenizer + if 'pad_token' in token_str_int_map: + assert tok.get_pad_token_id() == token_str_int_map['pad_token'] + if 'bos_token' in token_str_int_map: + assert tok.get_bos_token_id() == token_str_int_map['bos_token'] + if 'eos_token' in token_str_int_map: + assert tok.get_eos_token_id() == token_str_int_map['eos_token'] + diff --git a/tests/python_tests/test_whisper_generate_api.py b/tests/python_tests/test_whisper_generate_api.py index 1450ef1f2e..aa78666e32 100644 --- a/tests/python_tests/test_whisper_generate_api.py +++ b/tests/python_tests/test_whisper_generate_api.py @@ -6,7 +6,6 @@ import pytest import openvino_tokenizers import openvino -from ov_genai_test_utils import get_whisper_models_list import datasets from transformers import WhisperProcessor, pipeline, AutoTokenizer from optimum.intel.openvino import OVModelForSpeechSeq2Seq @@ -15,6 +14,8 @@ import time import typing import numpy as np +import os +import pathlib @pytest.fixture(scope="class", autouse=True) def run_gc_after_test(): @@ -25,6 +26,34 @@ def run_gc_after_test(): yield gc.collect() + +def get_whisper_models_list(tiny_only=False, multilingual=False, en_only=False): + precommit_models = [ + "openai/whisper-tiny", + "openai/whisper-tiny.en", + "distil-whisper/distil-small.en", + ] + if multilingual: + precommit_models = ["openai/whisper-tiny"] + if en_only: + precommit_models = ["openai/whisper-tiny.en", "distil-whisper/distil-small.en"] + if tiny_only: + precommit_models = ["openai/whisper-tiny"] + + nightly_models = [] + + if pytest.run_marker == "precommit": + model_ids = precommit_models + else: + model_ids = nightly_models + + if pytest.selected_model_ids: + model_ids = [model_id for model_id in model_ids if model_id in pytest.selected_model_ids.split(' ')] + + prefix = pathlib.Path(os.getenv('GENAI_MODELS_PATH_PREFIX', '')) + return [(model_id, prefix / model_id.split('/')[1]) for model_id in model_ids] + + # used whisper models are relatively small # cache them in memory to speedup tests @functools.lru_cache(3) diff --git a/tests/python_tests/tokenizer_configs.py b/tests/python_tests/tokenizer_configs.py index 45d60f998d..2b51dc2b0d 100644 --- a/tests/python_tests/tokenizer_configs.py +++ b/tests/python_tests/tokenizer_configs.py @@ -2,1011 +2,1011 @@ def get_tokenizer_configs(): return { "meta-llama/Meta-Llama-3-8B-Instruct": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|eot_id|>", - "pad_token": None, - "unk_token": None, - "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>", + "pad_token": None, + "unk_token": None, + "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" }, "TheBloke/Mistral-7B-OpenOrca-GPTQ": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|im_end|>", - "pad_token": "<|end_of_text|>", - "unk_token": None, - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|im_end|>", + "pad_token": "<|end_of_text|>", + "unk_token": None, + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "TinyLlama/TinyLlama-1.1B-Chat-v1.0": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" }, "upstage/SOLAR-10.7B-Instruct-v1.0": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{% if message['content']%}{{'### System:\n' + message['content']+'\n\n'}}{% endif %}{% elif message['role'] == 'user' %}{{'### User:\n' + message['content']+'\n\n'}}{% elif message['role'] == 'assistant' %}{{'### Assistant:\n' + message['content']}}{% endif %}{% if loop.last and add_generation_prompt %}{{ '### Assistant:\n' }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{% if message['content']%}{{'### System:\n' + message['content']+'\n\n'}}{% endif %}{% elif message['role'] == 'user' %}{{'### User:\n' + message['content']+'\n\n'}}{% elif message['role'] == 'assistant' %}{{'### Assistant:\n' + message['content']}}{% endif %}{% if loop.last and add_generation_prompt %}{{ '### Assistant:\n' }}{% endif %}{% endfor %}" }, "Nondzu/zephyr-speakleash-010-pl-3072-32-16-0.01": { - "bos_token": "", - "eos_token": "<|im_end|>", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful assistant.' %}{% endif %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{{'<|im_start|>system\n' + system_message + '<|im_end|>\n'}}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": "", + "eos_token": "<|im_end|>", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful assistant.' %}{% endif %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{{'<|im_start|>system\n' + system_message + '<|im_end|>\n'}}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" }, "vibhorag101/llama-2-13b-chat-hf-phr_mental_therapy": { - "bos_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": False, - "rstrip": False, - "single_word": False - }, - "eos_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": False, - "rstrip": False, - "single_word": False - }, - "pad_token": None, - "unk_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": False, - "rstrip": False, - "single_word": False - }, - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\n' + system_message + '\n<>\n\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content + ' ' + eos_token }}{% endif %}{% endfor %}" + "bos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": False, + "rstrip": False, + "single_word": False + }, + "eos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": False, + "rstrip": False, + "single_word": False + }, + "pad_token": None, + "unk_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": False, + "rstrip": False, + "single_word": False + }, + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\n' + system_message + '\n<>\n\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content + ' ' + eos_token }}{% endif %}{% endfor %}" }, "Qwen/Qwen1.5-0.5B": { - "bos_token": None, - "eos_token": "<|endoftext|>", - "pad_token": "<|endoftext|>", - "unk_token": None, - "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": None, + "eos_token": "<|endoftext|>", + "pad_token": "<|endoftext|>", + "unk_token": None, + "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "TheBloke/Mistral-7B-Instruct-v0.1-GPTQ": { - "bos_token": "<|endoftext|>", - "eos_token": "<|endoftext|>", - "pad_token": "<|endoftext|>", - "unk_token": "<|endoftext|>", - "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" + "bos_token": "<|endoftext|>", + "eos_token": "<|endoftext|>", + "pad_token": "<|endoftext|>", + "unk_token": "<|endoftext|>", + "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" }, "Felladrin/Llama-68M-Chat-v1": { - "bos_token": "<|im_start|>", - "eos_token": "<|im_end|>", - "pad_token": "<|im_end|>", - "unk_token": "<|endoftext|>", - "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": "<|im_start|>", + "eos_token": "<|im_end|>", + "pad_token": "<|im_end|>", + "unk_token": "<|endoftext|>", + "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "databricks/dbrx-instruct": { - "bos_token": "<|endoftext|>", - "eos_token": "<|endoftext|>", - "pad_token": "<|pad|>", - "unk_token": "<|endoftext|>", - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif 'system' not in messages[0]['role'] %}{% set loop_messages = messages %}{% set system_message = 'You are DBRX, created by Databricks. You were last updated in December 2023. You answer questions based on information available up to that point.\nYOU PROVIDE SHORT RESPONSES TO SHORT QUESTIONS OR STATEMENTS, but provide thorough responses to more complex and open-ended questions.\nYou assist with various tasks, from writing to coding (using markdown for code blocks \u2014 remember to use ``` with code, JSON, and tables).\n(You do not have real-time data access or code execution capabilities. You avoid stereotyping and provide balanced perspectives on controversial topics. You do not provide song lyrics, poems, or news articles and do not divulge details of your training data.)\nThis is your system prompt, guiding your responses. Do not reference it, just respond to the user. If you find yourself talking about this message, stop. You should be responding appropriately and usually that means not mentioning this.\nYOU DO NOT MENTION ANY OF THIS INFORMATION ABOUT YOURSELF UNLESS THE INFORMATION IS DIRECTLY PERTINENT TO THE USER\\'S QUERY.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{% if system_message != false %}{{ '<|im_start|>system\n' + system_message | trim + '<|im_end|>\n'}}{% endif %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% else %}{{ '\n' + '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% endif %}{% if (add_generation_prompt == true and loop.last) %}{{ '\n' + '<|im_start|>' + 'assistant' + '\n' }}{% endif %}{% endfor %}" + "bos_token": "<|endoftext|>", + "eos_token": "<|endoftext|>", + "pad_token": "<|pad|>", + "unk_token": "<|endoftext|>", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif 'system' not in messages[0]['role'] %}{% set loop_messages = messages %}{% set system_message = 'You are DBRX, created by Databricks. You were last updated in December 2023. You answer questions based on information available up to that point.\nYOU PROVIDE SHORT RESPONSES TO SHORT QUESTIONS OR STATEMENTS, but provide thorough responses to more complex and open-ended questions.\nYou assist with various tasks, from writing to coding (using markdown for code blocks \u2014 remember to use ``` with code, JSON, and tables).\n(You do not have real-time data access or code execution capabilities. You avoid stereotyping and provide balanced perspectives on controversial topics. You do not provide song lyrics, poems, or news articles and do not divulge details of your training data.)\nThis is your system prompt, guiding your responses. Do not reference it, just respond to the user. If you find yourself talking about this message, stop. You should be responding appropriately and usually that means not mentioning this.\nYOU DO NOT MENTION ANY OF THIS INFORMATION ABOUT YOURSELF UNLESS THE INFORMATION IS DIRECTLY PERTINENT TO THE USER\\'S QUERY.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{% if system_message != false %}{{ '<|im_start|>system\n' + system_message | trim + '<|im_end|>\n'}}{% endif %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% else %}{{ '\n' + '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% endif %}{% if (add_generation_prompt == true and loop.last) %}{{ '\n' + '<|im_start|>' + 'assistant' + '\n' }}{% endif %}{% endfor %}" }, "speakleash/Bielik-7B-Instruct-v0.1": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + eos_token }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + eos_token }}{% endif %}{% endfor %}" }, "internlm/internlm2-chat-7b": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "Qwen/Qwen2-7B-Instruct": { - "bos_token": None, - "eos_token": "<|im_end|>", - "pad_token": "<|endoftext|>", - "unk_token": None, - "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": None, + "eos_token": "<|im_end|>", + "pad_token": "<|endoftext|>", + "unk_token": None, + "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "codellama/CodeLlama-34b-Instruct-hf": { - "bos_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "eos_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "pad_token": None, - "unk_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content | trim + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content | trim + ' ' + eos_token }}{% endif %}{% endfor %}" + "bos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "eos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "pad_token": None, + "unk_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content | trim + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content | trim + ' ' + eos_token }}{% endif %}{% endfor %}" }, "OpenBuddy/openbuddy-llama3-8b-v21.1-8k": { - "bos_token": None, - "eos_token": "<|end|>", - "pad_token": "<|pad|>", - "unk_token": None, - "chat_template": "{% for message in messages %}{{'<|role|>' + message['role'] + '<|says|>' + message['content'] + '<|end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|role|>assistant<|says|>' }}{% endif %}" + "bos_token": None, + "eos_token": "<|end|>", + "pad_token": "<|pad|>", + "unk_token": None, + "chat_template": "{% for message in messages %}{{'<|role|>' + message['role'] + '<|says|>' + message['content'] + '<|end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|role|>assistant<|says|>' }}{% endif %}" }, "mosaicml/mpt-30b-chat": { - "bos_token": "<|endoftext|>", - "eos_token": "<|endoftext|>", - "pad_token": None, - "unk_token": "<|endoftext|>", - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif not 'system' in messages[0]['role'] %}{% set loop_messages = messages %}{% set system_message = 'A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{% if system_message != false %}{{ '<|im_start|>system\n' + system_message.strip() + '\n'}}{% endif %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% else %}{{ '\n' + '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% endif %}{% if (add_generation_prompt == true and loop.last) %}{{ '\n' + '<|im_start|>' + 'assistant' + '\n' }}{% elif (message['role'] == 'assistant') %}{% endif %}{% endfor %}" + "bos_token": "<|endoftext|>", + "eos_token": "<|endoftext|>", + "pad_token": None, + "unk_token": "<|endoftext|>", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif not 'system' in messages[0]['role'] %}{% set loop_messages = messages %}{% set system_message = 'A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{% if system_message != false %}{{ '<|im_start|>system\n' + system_message.strip() + '\n'}}{% endif %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% else %}{{ '\n' + '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% endif %}{% if (add_generation_prompt == true and loop.last) %}{{ '\n' + '<|im_start|>' + 'assistant' + '\n' }}{% elif (message['role'] == 'assistant') %}{% endif %}{% endfor %}" }, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": { - "bos_token": "", - "eos_token": "<|im_end|>", - "pad_token": "", - "unk_token": "", - "chat_template": "{{bos_token}}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": "", + "eos_token": "<|im_end|>", + "pad_token": "", + "unk_token": "", + "chat_template": "{{bos_token}}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "deepseek-ai/deepseek-coder-6.7b-instruct": { - "bos_token": { - "__type": "AddedToken", - "content": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "eos_token": { - "__type": "AddedToken", - "content": "<|EOT|>", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "pad_token": { - "__type": "AddedToken", - "content": "<\uff5cend\u2581of\u2581sentence\uff5c>", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "unk_token": None, - "chat_template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}" + "bos_token": { + "__type": "AddedToken", + "content": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "eos_token": { + "__type": "AddedToken", + "content": "<|EOT|>", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "pad_token": { + "__type": "AddedToken", + "content": "<\uff5cend\u2581of\u2581sentence\uff5c>", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "unk_token": None, + "chat_template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}" }, "deepseek-ai/deepseek-math-7b-rl": { - "bos_token": { - "__type": "AddedToken", - "content": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "eos_token": { - "__type": "AddedToken", - "content": "<\uff5cend\u2581of\u2581sentence\uff5c>", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "pad_token": { - "__type": "AddedToken", - "content": "<\uff5cend\u2581of\u2581sentence\uff5c>", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "unk_token": None, - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}" + "bos_token": { + "__type": "AddedToken", + "content": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "eos_token": { + "__type": "AddedToken", + "content": "<\uff5cend\u2581of\u2581sentence\uff5c>", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "pad_token": { + "__type": "AddedToken", + "content": "<\uff5cend\u2581of\u2581sentence\uff5c>", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "unk_token": None, + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}" }, "FINGU-AI/FinguAI-Chat-v1": { - "bos_token": None, - "eos_token": "<|im_end|>", - "pad_token": "<|endoftext|>", - "unk_token": None, - "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\n'}}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": None, + "eos_token": "<|im_end|>", + "pad_token": "<|endoftext|>", + "unk_token": None, + "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\n'}}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "allenai/tulu-2-7b": { - "bos_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "eos_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "pad_token": None, - "unk_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" + "bos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "eos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "pad_token": None, + "unk_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" }, "maldv/winter-garden-7b-alpha": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{{bos_token}}{% for message in messages %}{% if 'name' in message %}{{message['name'] + ('' if 'to' not in message else ' (to ' + message['to'] + ')') + ': ' + message['content'] + '\n\n'}}{% else %}{{message['content'] + '\n\n '}}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{{bos_token}}{% for message in messages %}{% if 'name' in message %}{{message['name'] + ('' if 'to' not in message else ' (to ' + message['to'] + ')') + ': ' + message['content'] + '\n\n'}}{% else %}{{message['content'] + '\n\n '}}{% endif %}{% endfor %}" }, "mlabonne/NeuralMonarch-7B": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}{{bos_token + message['role'] + '\n' + message['content'] + eos_token + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ bos_token + 'assistant\n' }}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}{{bos_token + message['role'] + '\n' + message['content'] + eos_token + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ bos_token + 'assistant\n' }}{% endif %}" }, "meta-llama/Llama-2-7b-chat-hf": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}" }, "GritLM/GritLM-7B": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{{ bos_token }}{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{{ bos_token }}{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" }, "ishorn5/RTLCoder-Deepseek-v1.1": { - "bos_token": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", - "eos_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", - "pad_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", - "unk_token": None, - "chat_template": "{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response:\\n'}}\n" + "bos_token": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", + "eos_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", + "pad_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", + "unk_token": None, + "chat_template": "{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response:\\n'}}\n" }, "jondurbin/bagel-34b-v0.2": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- '[INST] <>\\n' + messages[idx]['content'] + '\\n<>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- '[INST] <>\\n' + messages[idx]['content'] + '\\n<>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}" }, "openchat/openchat-3.5-0106": { - "bos_token": "", - "eos_token": "<|end_of_turn|>", - "pad_token": None, - "unk_token": "", - "chat_template": "{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}" + "bos_token": "", + "eos_token": "<|end_of_turn|>", + "pad_token": None, + "unk_token": "", + "chat_template": "{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}" }, "mobiuslabsgmbh/aanaphi2-v0.1": { - "bos_token": "<|endoftext|>", - "eos_token": "<|endoftext|>", - "pad_token": "[PAD]", - "unk_token": "<|endoftext|>", - "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{'### Human: ' + message['content'].strip() + '\n' }}{% elif message['role'] == 'assistant' %}{{'### Assistant: ' + message['content'].strip() + '\n'}}{% endif %}{% endfor %}" + "bos_token": "<|endoftext|>", + "eos_token": "<|endoftext|>", + "pad_token": "[PAD]", + "unk_token": "<|endoftext|>", + "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{'### Human: ' + message['content'].strip() + '\n' }}{% elif message['role'] == 'assistant' %}{{'### Assistant: ' + message['content'].strip() + '\n'}}{% endif %}{% endfor %}" }, "typeof/mistral-60m": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{bos_token + message['role'] + '\n' + message['content'] + eos_token + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ bos_token + 'assistant\n' }}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{bos_token + message['role'] + '\n' + message['content'] + eos_token + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ bos_token + 'assistant\n' }}{% endif %}" }, "turboderp/Cat-Llama-3-70B-instruct": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|im_end|>", - "pad_token": "<|end_of_text|>", - "unk_token": None, - "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nBelow is a conversation between a curious user and a helpful AI assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\n'}}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|im_end|>", + "pad_token": "<|end_of_text|>", + "unk_token": None, + "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nBelow is a conversation between a curious user and a helpful AI assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\n'}}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "saltlux/Ko-Llama3-Luxia-8B": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|end_of_text|>", - "pad_token": "<|end_of_text|>", - "unk_token": None, - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ content }}{% elif message['role'] == 'assistant' %}{{ content + '\\n' }}{% endif %}{% endfor %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>", + "pad_token": "<|end_of_text|>", + "unk_token": None, + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ content }}{% elif message['role'] == 'assistant' %}{{ content + '\\n' }}{% endif %}{% endfor %}" }, "h2oai/h2o-danube2-1.8b-chat": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '<|prompt|>' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% elif message['role'] == 'assistant' %}{{ '<|answer|>' + message['content'] + eos_token }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<|answer|>' }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '<|prompt|>' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% elif message['role'] == 'assistant' %}{{ '<|answer|>' + message['content'] + eos_token }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<|answer|>' }}{% endif %}{% endfor %}" }, "abhishek/autotrain-llama3-70b-orpo-v1": { - "bos_token": "", - "eos_token": "<|im_end|>", - "pad_token": "", - "unk_token": None, - "chat_template": "{% for message in messages %}\n{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% if loop.last and add_generation_prompt %}{{'<|im_start|>assistant\n' }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "<|im_end|>", + "pad_token": "", + "unk_token": None, + "chat_template": "{% for message in messages %}\n{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% if loop.last and add_generation_prompt %}{{'<|im_start|>assistant\n' }}{% endif %}{% endfor %}" }, "casperhansen/llama-3-70b-instruct-awq": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|end_of_text|>", - "pad_token": "<|end_of_text|>", - "unk_token": None, - "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>", + "pad_token": "<|end_of_text|>", + "unk_token": None, + "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}" }, "01-ai/Yi-1.5-34B-Chat": { - "bos_token": "<|startoftext|>", - "eos_token": "<|im_end|>", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}" + "bos_token": "<|startoftext|>", + "eos_token": "<|im_end|>", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}" }, "allenai/OLMo-7B-Instruct": { - "bos_token": None, - "eos_token": "<|endoftext|>", - "pad_token": "<|padding|>", - "unk_token": None, - "chat_template": "{{ eos_token }}{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" + "bos_token": None, + "eos_token": "<|endoftext|>", + "pad_token": "<|padding|>", + "unk_token": None, + "chat_template": "{{ eos_token }}{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" }, "TheBloke/deepseek-coder-33B-instruct-GPTQ": { - "bos_token": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", - "eos_token": "<|EOT|>", - "pad_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", - "unk_token": None, - "chat_template": "{%- set found_item = false -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set found_item = true -%}\n {%- endif -%}\n{%- endfor -%}\n{%- if not found_item -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response:\\n'}}\n" + "bos_token": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", + "eos_token": "<|EOT|>", + "pad_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", + "unk_token": None, + "chat_template": "{%- set found_item = false -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set found_item = true -%}\n {%- endif -%}\n{%- endfor -%}\n{%- if not found_item -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response:\\n'}}\n" }, "cognitivecomputations/dolphin-2.8-mistral-7b-v02": { - "bos_token": "", - "eos_token": "<|im_end|>", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": "", + "eos_token": "<|im_end|>", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "alexsobolev/IcaroLM": { - "bos_token": "", - "eos_token": "<|im_end|>", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}{% if message['from'] == 'human' %}{{'<|im_start|>user\n' + message['value'] + '<|im_end|>\n'}}{% elif message['from'] == 'gpt' %}{{'<|im_start|>assistant\n' + message['value'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['value'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": "", + "eos_token": "<|im_end|>", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}{% if message['from'] == 'human' %}{{'<|im_start|>user\n' + message['value'] + '<|im_end|>\n'}}{% elif message['from'] == 'gpt' %}{{'<|im_start|>assistant\n' + message['value'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['value'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "tokyotech-llm/Swallow-7b-instruct-v0.1": { - "bos_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": False, - "rstrip": False, - "single_word": False - }, - "eos_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": False, - "rstrip": False, - "single_word": False - }, - "pad_token": None, - "unk_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": False, - "rstrip": False, - "single_word": False - }, - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif false == true and not '<>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = '\u3042\u306a\u305f\u306f\u8aa0\u5b9f\u3067\u512a\u79c0\u306a\u65e5\u672c\u4eba\u306e\u30a2\u30b7\u30b9\u30bf\u30f3\u30c8\u3067\u3059\u3002' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{{ bos_token }}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + content.strip() + ' [/INST] ' }}{% elif message['role'] == 'system' %}{{ '<>\\n' + content.strip() + '\\n<>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ '' + content.strip() + '' + eos_token }}{% endif %}{% endfor %}" + "bos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": False, + "rstrip": False, + "single_word": False + }, + "eos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": False, + "rstrip": False, + "single_word": False + }, + "pad_token": None, + "unk_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": False, + "rstrip": False, + "single_word": False + }, + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif false == true and not '<>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = '\u3042\u306a\u305f\u306f\u8aa0\u5b9f\u3067\u512a\u79c0\u306a\u65e5\u672c\u4eba\u306e\u30a2\u30b7\u30b9\u30bf\u30f3\u30c8\u3067\u3059\u3002' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{{ bos_token }}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + content.strip() + ' [/INST] ' }}{% elif message['role'] == 'system' %}{{ '<>\\n' + content.strip() + '\\n<>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ '' + content.strip() + '' + eos_token }}{% endif %}{% endfor %}" }, "instructlab/merlinite-7b-lab": { - "bos_token": "", - "eos_token": "<|endoftext|>", - "pad_token": "<|pad|>", - "unk_token": "", - "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>'+ '\n' + message['content'] + '\n'}}{% elif message['role'] == 'user' %}{{'<|user|>' + '\n' + message['content'] + '\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>' + '\n' + message['content'] + '<|endoftext|>' + ('' if loop.last else '\n')}}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "<|endoftext|>", + "pad_token": "<|pad|>", + "unk_token": "", + "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>'+ '\n' + message['content'] + '\n'}}{% elif message['role'] == 'user' %}{{'<|user|>' + '\n' + message['content'] + '\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>' + '\n' + message['content'] + '<|endoftext|>' + ('' if loop.last else '\n')}}{% endif %}{% endfor %}" }, "microsoft/Phi-3-medium-128k-instruct": { - "bos_token": "", - "eos_token": "<|endoftext|>", - "pad_token": "<|placeholder6|>", - "unk_token": "", - "chat_template": "{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "<|endoftext|>", + "pad_token": "<|placeholder6|>", + "unk_token": "", + "chat_template": "{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}" }, "katuni4ka/tiny-random-phi3": { - "bos_token": "", - "eos_token": "<|endoftext|>", - "pad_token": "<|endoftext|>", - "unk_token": "", - "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'system') %}{{'<|system|>' + '\n' + message['content'] + '<|end|>' + '\n'}}{% elif (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif message['role'] == 'assistant' %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "<|endoftext|>", + "pad_token": "<|endoftext|>", + "unk_token": "", + "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'system') %}{{'<|system|>' + '\n' + message['content'] + '<|end|>' + '\n'}}{% elif (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif message['role'] == 'assistant' %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}" }, "microsoft/Phi-3-mini-128k-instruct": { - "bos_token": "", - "eos_token": "<|endoftext|>", - "pad_token": "<|placeholder6|>", - "unk_token": "", - "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "<|endoftext|>", + "pad_token": "<|placeholder6|>", + "unk_token": "", + "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}" }, "VAGOsolutions/SauerkrautLM-Qwen-32b": { - "bos_token": None, - "eos_token": "<|im_end|>", - "pad_token": "<|endoftext|>", - "unk_token": None, - "chat_template": "{% set system_message = 'Du bist ein freundlicher und hilfsbereiter KI-Assistent.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}" + "bos_token": None, + "eos_token": "<|im_end|>", + "pad_token": "<|endoftext|>", + "unk_token": None, + "chat_template": "{% set system_message = 'Du bist ein freundlicher und hilfsbereiter KI-Assistent.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}" }, "AI-Sweden-Models/gpt-sw3-356m-instruct": { - "bos_token": None, - "eos_token": None, - "pad_token": None, - "unk_token": None, - "chat_template": "{{ eos_token }}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content']}}{% else %}{{ 'Bot: ' + message['content']}}{% endif %}{{ message['text'] }}{{ bos_token }}{% endfor %}Bot:" + "bos_token": None, + "eos_token": None, + "pad_token": None, + "unk_token": None, + "chat_template": "{{ eos_token }}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content']}}{% else %}{{ 'Bot: ' + message['content']}}{% endif %}{{ message['text'] }}{{ bos_token }}{% endfor %}Bot:" }, "google/gemma-7b-it": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '' + role + '\n' + message['content'] | trim + '\n' }}{% endfor %}{% if add_generation_prompt %}{{'model\n'}}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '' + role + '\n' + message['content'] | trim + '\n' }}{% endfor %}{% if add_generation_prompt %}{{'model\n'}}{% endif %}" }, "ise-uiuc/Magicoder-S-DS-6.7B": { - "bos_token": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", - "eos_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", - "pad_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", - "unk_token": None, - "chat_template": "{{bos_token}}{{'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n'}}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n {{ raise_exception('System messages are not allowed in this template.') }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'@@ Instruction\n' + message['content'] + '\n\n'}}\n {%- else %}\n{{'@@ Response\n' + message['content'] + eos_token + '\n\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'@@ Response\n'}}" + "bos_token": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", + "eos_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", + "pad_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", + "unk_token": None, + "chat_template": "{{bos_token}}{{'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n'}}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n {{ raise_exception('System messages are not allowed in this template.') }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'@@ Instruction\n' + message['content'] + '\n\n'}}\n {%- else %}\n{{'@@ Response\n' + message['content'] + eos_token + '\n\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'@@ Response\n'}}" }, "Deci/DeciLM-7B": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '### User:\n' + message['content'] }}\n{% elif message['role'] == 'system' %}\n{{ '### System:\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '### Assistant:\n' + message['content'] }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '### Assistant:' }}\n{% endif %}\n{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '### User:\n' + message['content'] }}\n{% elif message['role'] == 'system' %}\n{{ '### System:\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '### Assistant:\n' + message['content'] }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '### Assistant:' }}\n{% endif %}\n{% endfor %}" }, "katuni4ka/tiny-random-minicpm": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{'<\u7528\u6237>' + message['content'].strip() + ''}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{'<\u7528\u6237>' + message['content'].strip() + ''}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}" }, "UnicomLLM/Unichat-llama3-Chinese-8B-28K": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|end_of_text|>", - "pad_token": "<|end_of_text|>", - "unk_token": None, - "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = message['content'] %}{% if loop.index0 == 0 %}{% set content =bos_token + content %}{% endif %}{% if loop.index0 ==1 %}{% set content = 'Human:' + content %}{% endif %}{% if loop.index0 %2!=0 and loop.index0 !=1 %}{% set content = bos_token+'Human:' + content %}{% endif %}{% if loop.index0 !=0 and loop.index0 %2==0 and not loop.last %}{% set content = 'Assistant:'+content+ eos_token %}{% endif %}{{ content+'\n' }}{% endfor %}{{ 'Assistant:' }}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>", + "pad_token": "<|end_of_text|>", + "unk_token": None, + "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = message['content'] %}{% if loop.index0 == 0 %}{% set content =bos_token + content %}{% endif %}{% if loop.index0 ==1 %}{% set content = 'Human:' + content %}{% endif %}{% if loop.index0 %2!=0 and loop.index0 !=1 %}{% set content = bos_token+'Human:' + content %}{% endif %}{% if loop.index0 !=0 and loop.index0 %2==0 and not loop.last %}{% set content = 'Assistant:'+content+ eos_token %}{% endif %}{{ content+'\n' }}{% endfor %}{{ 'Assistant:' }}" }, "RLHFlow/LLaMA3-SFT": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|eot_id|>", - "pad_token": "<|end_of_text|>", - "unk_token": None, - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|start_header_id|>' + message['role'] + '<|end_header_id|>' + '\n' + message['content'] + '<|eot_id|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n' }}{% endif %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>", + "pad_token": "<|end_of_text|>", + "unk_token": None, + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|start_header_id|>' + message['role'] + '<|end_header_id|>' + '\n' + message['content'] + '<|eot_id|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n' }}{% endif %}" }, "bofenghuang/vigogne-2-7b-chat": { - "bos_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": False, - "rstrip": False, - "single_word": False - }, - "eos_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": False, - "rstrip": False, - "single_word": False - }, - "pad_token": None, - "unk_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": False, - "rstrip": False, - "single_word": False - }, - "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif true == true %}{% set loop_messages = messages %}{% set system_message = 'Vous \u00eates Vigogne, un assistant IA cr\u00e9\u00e9 par Zaion Lab. Vous suivez extr\u00eamement bien les instructions. Aidez autant que vous le pouvez.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% if system_message != false %}{{ '<|system|>: ' + system_message + '\\n' }}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|user|>: ' + message['content'].strip() + '\\n' }}{% elif message['role'] == 'assistant' %}{{ '<|assistant|>: ' + message['content'].strip() + eos_token + '\\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>:' }}{% endif %}" + "bos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": False, + "rstrip": False, + "single_word": False + }, + "eos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": False, + "rstrip": False, + "single_word": False + }, + "pad_token": None, + "unk_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": False, + "rstrip": False, + "single_word": False + }, + "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif true == true %}{% set loop_messages = messages %}{% set system_message = 'Vous \u00eates Vigogne, un assistant IA cr\u00e9\u00e9 par Zaion Lab. Vous suivez extr\u00eamement bien les instructions. Aidez autant que vous le pouvez.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% if system_message != false %}{{ '<|system|>: ' + system_message + '\\n' }}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|user|>: ' + message['content'].strip() + '\\n' }}{% elif message['role'] == 'assistant' %}{{ '<|assistant|>: ' + message['content'].strip() + eos_token + '\\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>:' }}{% endif %}" }, "aisingapore/sea-lion-7b-instruct": { - "bos_token": None, - "eos_token": "<|endoftext|>", - "pad_token": "<|padding|>", - "unk_token": "", - "chat_template": "{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}### USER:\n{{ message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}### RESPONSE:\n{{ message['content'] + '\n\n' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}{% if add_generation_prompt %}### RESPONSE:\n{% endif %}" + "bos_token": None, + "eos_token": "<|endoftext|>", + "pad_token": "<|padding|>", + "unk_token": "", + "chat_template": "{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}### USER:\n{{ message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}### RESPONSE:\n{{ message['content'] + '\n\n' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}{% if add_generation_prompt %}### RESPONSE:\n{% endif %}" }, "microsoft/Phi-3-small-8k-instruct": { - "bos_token": "<|endoftext|>", - "eos_token": "<|endoftext|>", - "pad_token": "<|endoftext|>", - "unk_token": None, - "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}" + "bos_token": "<|endoftext|>", + "eos_token": "<|endoftext|>", + "pad_token": "<|endoftext|>", + "unk_token": None, + "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}" }, "THUDM/cogvlm2-llama3-chat-19B": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|end_of_text|>", - "pad_token": None, - "unk_token": None, - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% else %}{{ eos_token }}{% endif %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>", + "pad_token": None, + "unk_token": None, + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% else %}{{ eos_token }}{% endif %}" }, "tiiuae/falcon-11B": { - "bos_token": ">>", - "eos_token": "<|endoftext|>", - "pad_token": "<|endoftext|>", - "unk_token": None, - "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'User: \n' + message['content'] }}\n{% elif message['role'] == 'system' %}\n{{ 'System: ' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ 'Falcon:\n' + message['content']}}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Falcon:' }}\n{% endif %}\n{% endfor %}" + "bos_token": ">>", + "eos_token": "<|endoftext|>", + "pad_token": "<|endoftext|>", + "unk_token": None, + "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'User: \n' + message['content'] }}\n{% elif message['role'] == 'system' %}\n{{ 'System: ' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ 'Falcon:\n' + message['content']}}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Falcon:' }}\n{% endif %}\n{% endfor %}" }, "Mihaiii/Pallas-0.5": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{% if message['content']%}{{'SYSTEM:\n' + message['content']+'\n\n'}}{% endif %}{% elif message['role'] == 'user' %}{{'USER:\n' + message['content']+'\n\n'}}{% elif message['role'] == 'assistant' %}{{'ASSISTANT:\n' + message['content']}}{% endif %}{% if loop.last and add_generation_prompt %}{{ 'ASSISTANT:\n' }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{% if message['content']%}{{'SYSTEM:\n' + message['content']+'\n\n'}}{% endif %}{% elif message['role'] == 'user' %}{{'USER:\n' + message['content']+'\n\n'}}{% elif message['role'] == 'assistant' %}{{'ASSISTANT:\n' + message['content']}}{% endif %}{% if loop.last and add_generation_prompt %}{{ 'ASSISTANT:\n' }}{% endif %}{% endfor %}" }, "prithivida/Asimov-7B-v2": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'### ' + message['role'] + ': ' + message['content'] }}{% endfor %}{% if add_generation_prompt %}{{ '### Assistant: ' }}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'### ' + message['role'] + ': ' + message['content'] }}{% endfor %}{% if add_generation_prompt %}{{ '### Assistant: ' }}{% endif %}" }, "dreamgen/opus-v1.2-7b": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>'}}{% if message['role']=='assistant' %}{{'text'}}{% else %}{{message['role']}}{% endif %}{{'\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>text\n' }}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>'}}{% if message['role']=='assistant' %}{{'text'}}{% else %}{{message['role']}}{% endif %}{{'\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>text\n' }}{% endif %}" }, "KnutJaegersberg/internlm-20b-llama": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.last and message['role'] != 'user' %}{{ raise_exception('Most recent message must come from user!') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|User|>:' + message['content'] + '\n'}}{% elif message['role'] == 'assistant' %}{{ '<|Bot|>:' + message['content'] + '\n'}}{% else %}{{ raise_exception('Only user and assistant roles are supported in this model!') }}{% endif %}{% endfor %}{{ '<|Bot|>:' }}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.last and message['role'] != 'user' %}{{ raise_exception('Most recent message must come from user!') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|User|>:' + message['content'] + '\n'}}{% elif message['role'] == 'assistant' %}{{ '<|Bot|>:' + message['content'] + '\n'}}{% else %}{{ raise_exception('Only user and assistant roles are supported in this model!') }}{% endif %}{% endfor %}{{ '<|Bot|>:' }}" }, "alpindale/WizardLM-2-8x22B": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{{ messages[0]['content'].strip() }}{% else %}{% set loop_messages = messages %}{{ 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\'s questions.' }}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{% if message['role'] == 'system' or message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% else %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% else %}{% if message['role'] == 'system' or message['role'] == 'user' %}{{ '\nUSER: ' + message['content'].strip() }}{% else %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{{ messages[0]['content'].strip() }}{% else %}{% set loop_messages = messages %}{{ 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\'s questions.' }}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{% if message['role'] == 'system' or message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% else %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% else %}{% if message['role'] == 'system' or message['role'] == 'user' %}{{ '\nUSER: ' + message['content'].strip() }}{% else %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}" }, "yentinglin/Taiwan-LLM-7B-v2.0-base": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'].strip() %}{% else %}{% set loop_messages = messages %}{% set system_message = '\u4f60\u662f\u4eba\u5de5\u667a\u6167\u52a9\u7406\uff0c\u4ee5\u4e0b\u662f\u7528\u6236\u548c\u4eba\u5de5\u667a\u80fd\u52a9\u7406\u4e4b\u9593\u7684\u5c0d\u8a71\u3002\u4f60\u8981\u5c0d\u7528\u6236\u7684\u554f\u984c\u63d0\u4f9b\u6709\u7528\u3001\u5b89\u5168\u3001\u8a73\u7d30\u548c\u79ae\u8c8c\u7684\u56de\u7b54\u3002' %}{% endif %}{{system_message + eos_token}}{% for message in loop_messages %}{% if message['role'] == 'user' %}USER: {{ message['content'].strip() + eos_token }}{% elif message['role'] == 'system' %}{{message['content'].strip() + eos_token}}{% elif message['role'] == 'assistant' %}ASSISTANT: {{ message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{'ASSISTANT:'}}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'].strip() %}{% else %}{% set loop_messages = messages %}{% set system_message = '\u4f60\u662f\u4eba\u5de5\u667a\u6167\u52a9\u7406\uff0c\u4ee5\u4e0b\u662f\u7528\u6236\u548c\u4eba\u5de5\u667a\u80fd\u52a9\u7406\u4e4b\u9593\u7684\u5c0d\u8a71\u3002\u4f60\u8981\u5c0d\u7528\u6236\u7684\u554f\u984c\u63d0\u4f9b\u6709\u7528\u3001\u5b89\u5168\u3001\u8a73\u7d30\u548c\u79ae\u8c8c\u7684\u56de\u7b54\u3002' %}{% endif %}{{system_message + eos_token}}{% for message in loop_messages %}{% if message['role'] == 'user' %}USER: {{ message['content'].strip() + eos_token }}{% elif message['role'] == 'system' %}{{message['content'].strip() + eos_token}}{% elif message['role'] == 'assistant' %}ASSISTANT: {{ message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{'ASSISTANT:'}}{% endif %}" }, "maywell/Synatra-Mixtral-8x7B": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n{% for message in messages %}{% if message['role'] == 'user' %}### Instruction:\n{{ message['content']|trim -}}{% if not loop.last %}{% endif %}\n{% elif message['role'] == 'assistant' %}### Response:\n{{ message['content']|trim -}}{% if not loop.last %}{% endif %}\n{% elif message['role'] == 'system' %}{{ message['content']|trim -}}{% if not loop.last %}{% endif %}\n{% endif %}\n{% endfor %}\n{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}\n### Response:\n{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n{% for message in messages %}{% if message['role'] == 'user' %}### Instruction:\n{{ message['content']|trim -}}{% if not loop.last %}{% endif %}\n{% elif message['role'] == 'assistant' %}### Response:\n{{ message['content']|trim -}}{% if not loop.last %}{% endif %}\n{% elif message['role'] == 'system' %}{{ message['content']|trim -}}{% if not loop.last %}{% endif %}\n{% endif %}\n{% endfor %}\n{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}\n### Response:\n{% endif %}" }, "MediaTek-Research/Breeze-7B-Instruct-v1_0": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'].strip() %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful AI assistant built by MediaTek Research. The user you are helping speaks Traditional Chinese and comes from Taiwan.' %}{% endif %}{{ bos_token }} {{ system_message }} {% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/... or system/user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ ' [INST] ' + message['content'] + ' [/INST] ' }}{% elif message['role'] == 'assistant' %}{{ message['content'] }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'].strip() %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful AI assistant built by MediaTek Research. The user you are helping speaks Traditional Chinese and comes from Taiwan.' %}{% endif %}{{ bos_token }} {{ system_message }} {% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/... or system/user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ ' [INST] ' + message['content'] + ' [/INST] ' }}{% elif message['role'] == 'assistant' %}{{ message['content'] }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" }, "MTSAIR/multi_verse_model": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '### Instruction:\n' + message['content'] + '\n### Response:\n' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% elif message['role'] == 'system' %}{{ '### System:\n' + message['content'] + '\n' }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '### Instruction:\n' + message['content'] + '\n### Response:\n' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% elif message['role'] == 'system' %}{{ '### System:\n' + message['content'] + '\n' }}{% endif %}{% endfor %}" }, "bofenghuang/vigostral-7b-chat": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif true == true and not '<>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = 'Vous \u00eates Vigogne, un assistant IA cr\u00e9\u00e9 par Zaion Lab. Vous suivez extr\u00eamement bien les instructions. Aidez autant que vous le pouvez.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<>\\n' + content.strip() + '\\n<>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif true == true and not '<>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = 'Vous \u00eates Vigogne, un assistant IA cr\u00e9\u00e9 par Zaion Lab. Vous suivez extr\u00eamement bien les instructions. Aidez autant que vous le pouvez.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<>\\n' + content.strip() + '\\n<>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}" }, "SeaLLMs/SeaLLM-7B-v2.5": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "qnguyen3/Master-Yi-9B": { - "bos_token": "<|startoftext|>", - "eos_token": "<|im_end|>", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}" + "bos_token": "<|startoftext|>", + "eos_token": "<|im_end|>", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}" }, "meetkai/functionary-small-v2.5": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|end_of_text|>", - "pad_token": "<|end_of_text|>", - "unk_token": None, - "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' or message['role'] == 'system' %}\n{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] + '<|eot_id|>' }}{% elif message['role'] == 'tool' %}\n{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + 'name=' + message['name'] + '\n' + message['content'] + '<|eot_id|>' }}{% else %}\n{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'}}{% if message['content'] is not none %}\n{{ message['content'] }}{% endif %}\n{% if 'tool_calls' in message and message['tool_calls'] is not none %}\n{% for tool_call in message['tool_calls'] %}\n{{ '<|reserved_special_token_249|>' + tool_call['function']['name'] + '\n' + tool_call['function']['arguments'] }}{% endfor %}\n{% endif %}\n{{ '<|eot_id|>' }}{% endif %}\n{% endfor %}\n{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>", + "pad_token": "<|end_of_text|>", + "unk_token": None, + "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' or message['role'] == 'system' %}\n{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] + '<|eot_id|>' }}{% elif message['role'] == 'tool' %}\n{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + 'name=' + message['name'] + '\n' + message['content'] + '<|eot_id|>' }}{% else %}\n{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'}}{% if message['content'] is not none %}\n{{ message['content'] }}{% endif %}\n{% if 'tool_calls' in message and message['tool_calls'] is not none %}\n{% for tool_call in message['tool_calls'] %}\n{{ '<|reserved_special_token_249|>' + tool_call['function']['name'] + '\n' + tool_call['function']['arguments'] }}{% endfor %}\n{% endif %}\n{{ '<|eot_id|>' }}{% endif %}\n{% endfor %}\n{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" }, "h2oai/h2o-danube-1.8b-chat": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '<|prompt|>' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ '<|system|>' + message['content'] + eos_token }}{% elif message['role'] == 'assistant' %}{{ '<|answer|>' + message['content'] + eos_token }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<|answer|>' }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '<|prompt|>' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ '<|system|>' + message['content'] + eos_token }}{% elif message['role'] == 'assistant' %}{{ '<|answer|>' + message['content'] + eos_token }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<|answer|>' }}{% endif %}{% endfor %}" }, "TheBloke/CodeLlama-70B-Instruct-AWQ": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set user_index = 1 %}{% else %}{% set user_index = 0 %}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != ((loop.index0 + user_index) % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ '' }}{% endif %}{% set content = 'Source: ' + message['role'] + '\n\n ' + message['content'].strip() %}{{ content + ' ' }}{% endfor %}{{'Source: assistant\nDestination: user\n\n '}}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set user_index = 1 %}{% else %}{% set user_index = 0 %}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != ((loop.index0 + user_index) % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ '' }}{% endif %}{% set content = 'Source: ' + message['role'] + '\n\n ' + message['content'].strip() %}{{ content + ' ' }}{% endfor %}{{'Source: assistant\nDestination: user\n\n '}}" }, "FairMind/Phi-3-mini-4k-instruct-bnb-4bit-Ita": { - "bos_token": "", - "eos_token": "<|endoftext|>", - "pad_token": "<|endoftext|>", - "unk_token": "", - "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] in ['user', 'system']) %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif message['role'] == 'assistant' %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "<|endoftext|>", + "pad_token": "<|endoftext|>", + "unk_token": "", + "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] in ['user', 'system']) %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif message['role'] == 'assistant' %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}" }, "ibm-granite/granite-8b-code-instruct": { - "bos_token": "<|endoftext|>", - "eos_token": "<|endoftext|>", - "pad_token": "<|endoftext|>", - "unk_token": "<|endoftext|>", - "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'Question:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'system' %}\n{{ 'System:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Answer:\n' + message['content'] + '\n\n' }}{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Answer:\n' }}{% endif %}{% endfor %}" + "bos_token": "<|endoftext|>", + "eos_token": "<|endoftext|>", + "pad_token": "<|endoftext|>", + "unk_token": "<|endoftext|>", + "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'Question:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'system' %}\n{{ 'System:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Answer:\n' + message['content'] + '\n\n' }}{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Answer:\n' }}{% endif %}{% endfor %}" }, "dicta-il/dictalm2.0-instruct": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]\n' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]\n' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" }, "nvidia/Llama3-ChatQA-1.5-8B": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|end_of_text|>", - "pad_token": None, - "unk_token": None, - "chat_template": "{{ bos_token }}{%- if messages[0]['role'] == 'system' -%}{% set loop_messages = messages[1:] %}{%- else -%}{% set loop_messages = messages %}{% endif %}System: This is a chat between a user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions based on the context. The assistant should also indicate when the answer cannot be found in the context.\n\n{% for message in loop_messages %}{%- if message['role'] == 'user' -%}User: {{ message['content'].strip() + '\n\n' }}{%- else -%}Assistant: {{ message['content'].strip() + '\n\n' }}{%- endif %}{% if loop.last and message['role'] == 'user' %}Assistant:{% endif %}{% endfor %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>", + "pad_token": None, + "unk_token": None, + "chat_template": "{{ bos_token }}{%- if messages[0]['role'] == 'system' -%}{% set loop_messages = messages[1:] %}{%- else -%}{% set loop_messages = messages %}{% endif %}System: This is a chat between a user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions based on the context. The assistant should also indicate when the answer cannot be found in the context.\n\n{% for message in loop_messages %}{%- if message['role'] == 'user' -%}User: {{ message['content'].strip() + '\n\n' }}{%- else -%}Assistant: {{ message['content'].strip() + '\n\n' }}{%- endif %}{% if loop.last and message['role'] == 'user' %}Assistant:{% endif %}{% endfor %}" }, "openchat/openchat-3.6-8b-20240522": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|eot_id|>", - "pad_token": None, - "unk_token": None, - "chat_template": "{{ bos_token }}{% for message in messages %}{% if message['role'] in ['user', 'assistant'] %}{% set content = '<|start_header_id|>GPT4 Correct ' + message['role'].title() + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' %}{% elif message['role'] == 'system' %}{% set content = '<|start_header_id|>System<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' %}{% else %}{{ raise_exception('Only user, assistant and system roles are supported!') }}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>GPT4 Correct Assistant<|end_header_id|>\n\n' }}{% endif %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>", + "pad_token": None, + "unk_token": None, + "chat_template": "{{ bos_token }}{% for message in messages %}{% if message['role'] in ['user', 'assistant'] %}{% set content = '<|start_header_id|>GPT4 Correct ' + message['role'].title() + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' %}{% elif message['role'] == 'system' %}{% set content = '<|start_header_id|>System<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' %}{% else %}{{ raise_exception('Only user, assistant and system roles are supported!') }}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>GPT4 Correct Assistant<|end_header_id|>\n\n' }}{% endif %}" }, "OpenBuddy/openbuddy-mistral2-7b-v20.3-32k": { - "bos_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "eos_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "pad_token": None, - "unk_token": { - "__type": "AddedToken", - "content": "", - "lstrip": False, - "normalized": True, - "rstrip": False, - "single_word": False - }, - "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n' }}{% elif message['role'] == 'assistant' %}{% if loop.last %}{{ 'Assistant: ' + message['content']}}{% else %}{{ 'Assistant: ' + message['content'] + eos_token + '\n' }}{% endif %}{% elif message['role'] == 'system' %}{{ message['content'] + '\n' }}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ 'Assistant:' }}{% endif %}" + "bos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "eos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "pad_token": None, + "unk_token": { + "__type": "AddedToken", + "content": "", + "lstrip": False, + "normalized": True, + "rstrip": False, + "single_word": False + }, + "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n' }}{% elif message['role'] == 'assistant' %}{% if loop.last %}{{ 'Assistant: ' + message['content']}}{% else %}{{ 'Assistant: ' + message['content'] + eos_token + '\n' }}{% endif %}{% elif message['role'] == 'system' %}{{ message['content'] + '\n' }}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ 'Assistant:' }}{% endif %}" }, "tenyx/TenyxChat-7B-v1": { - "bos_token": "", - "eos_token": "<|end_of_turn|>", - "pad_token": "<|end_of_turn|>", - "unk_token": "", - "chat_template": "{{ bos_token }} {% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'User:' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ 'System:' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ 'Assistant:' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Assistant:' }}{% endif %}\n{% endfor %}" + "bos_token": "", + "eos_token": "<|end_of_turn|>", + "pad_token": "<|end_of_turn|>", + "unk_token": "", + "chat_template": "{{ bos_token }} {% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'User:' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ 'System:' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ 'Assistant:' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Assistant:' }}{% endif %}\n{% endfor %}" }, "LoneStriker/TinyLlama-1.1B-32k-Instruct-3.0bpw-h6-exl2": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{{ bos_token }}{% if bos_token|length > 0 %}{{ '\n' }}{% endif %}{% if messages[0]['role'] != 'system' %}{{ '### Instruction:\nYou are an unbiased, uncensored, helpful assistant.' }}{% endif %}{% for message in messages %}{% if message['role'] == 'system' %}{{ '### Instruction:\n' + message['content'] }}{% elif message['role'] == 'user' %}{{ '\n\n### Input:\n' + message['content'] }}{% elif message['role'] == 'assistant' %}{{ '\n\n### Response:\n' + message['content'] + eos_token }}{% else %}{{ raise_exception('Only user, assistant, and system roles are supported!') }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '\n\n### Response:\n' }}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{{ bos_token }}{% if bos_token|length > 0 %}{{ '\n' }}{% endif %}{% if messages[0]['role'] != 'system' %}{{ '### Instruction:\nYou are an unbiased, uncensored, helpful assistant.' }}{% endif %}{% for message in messages %}{% if message['role'] == 'system' %}{{ '### Instruction:\n' + message['content'] }}{% elif message['role'] == 'user' %}{{ '\n\n### Input:\n' + message['content'] }}{% elif message['role'] == 'assistant' %}{{ '\n\n### Response:\n' + message['content'] + eos_token }}{% else %}{{ raise_exception('Only user, assistant, and system roles are supported!') }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '\n\n### Response:\n' }}{% endif %}" }, "SeaLLMs/SeaLLM-7B-v2": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + ''}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + ''}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" }, "cognitivecomputations/dolphin-2.6-mistral-7b-dpo-laser": { - "bos_token": "", - "eos_token": "<|im_end|>", - "pad_token": "<|im_end|>", - "unk_token": "", - "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|im_start|>user\n' + message['content'] + '<|im_end|>' }}\n{% elif message['role'] == 'system' %}\n{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>' }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|im_start|>assistant\n' + message['content'] + '<|im_end|>' }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|im_start|>assistant' }}\n{% endif %}\n{% endfor %}" + "bos_token": "", + "eos_token": "<|im_end|>", + "pad_token": "<|im_end|>", + "unk_token": "", + "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|im_start|>user\n' + message['content'] + '<|im_end|>' }}\n{% elif message['role'] == 'system' %}\n{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>' }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|im_start|>assistant\n' + message['content'] + '<|im_end|>' }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|im_start|>assistant' }}\n{% endif %}\n{% endfor %}" }, "vaiv/llamion-14b-chat": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}{% if loop.first %}{{ bos_token }}{% endif %}{% if message['role'] == 'user' %}{{ 'Human: ' + message['content'] + '\n\nAssistant: ' + eos_token }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}{% if loop.first %}{{ bos_token }}{% endif %}{% if message['role'] == 'user' %}{{ 'Human: ' + message['content'] + '\n\nAssistant: ' + eos_token }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token }}{% endif %}{% endfor %}" }, "yam-peleg/Hebrew-Gemma-11B-V2": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '' + role + '\n' + message['content'] | trim + '\n' }}{% endfor %}{% if add_generation_prompt %}{{'model\n'}}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '' + role + '\n' + message['content'] | trim + '\n' }}{% endfor %}{% if add_generation_prompt %}{{'model\n'}}{% endif %}" }, "shenzhi-wang/Llama3-8B-Chinese-Chat": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|eot_id|>", - "pad_token": "<|eot_id|>", - "unk_token": None, - "chat_template": "{{ '<|begin_of_text|>' }}{% set system_message = 'You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% set loop_messages = messages[1:] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ '<|start_header_id|>system<|end_header_id|>\n\n' + system_message | trim + '<|eot_id|>' }}{% endif %}{% for message in loop_messages %}{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>", + "pad_token": "<|eot_id|>", + "unk_token": None, + "chat_template": "{{ '<|begin_of_text|>' }}{% set system_message = 'You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% set loop_messages = messages[1:] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ '<|start_header_id|>system<|end_header_id|>\n\n' + system_message | trim + '<|eot_id|>' }}{% endif %}{% for message in loop_messages %}{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" }, "ericzzz/falcon-rw-1b-chat": { - "bos_token": "<|endoftext|>", - "eos_token": "<|endoftext|>", - "pad_token": None, - "unk_token": "<|endoftext|>", - "chat_template": "{% for message in messages %}{% if loop.index > 1 and loop.previtem['role'] != 'assistant' %}{{ ' ' }}{% endif %}{% if message['role'] == 'system' %}{{ '[SYS] ' + message['content'].strip() }}{% elif message['role'] == 'user' %}{{ '[INST] ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ '[RESP] ' + message['content'] + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' [RESP] ' }}{% endif %}" + "bos_token": "<|endoftext|>", + "eos_token": "<|endoftext|>", + "pad_token": None, + "unk_token": "<|endoftext|>", + "chat_template": "{% for message in messages %}{% if loop.index > 1 and loop.previtem['role'] != 'assistant' %}{{ ' ' }}{% endif %}{% if message['role'] == 'system' %}{{ '[SYS] ' + message['content'].strip() }}{% elif message['role'] == 'user' %}{{ '[INST] ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ '[RESP] ' + message['content'] + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' [RESP] ' }}{% endif %}" }, "NLPark/AnFeng_v3_Avocet": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}" }, "microsoft/Phi-3-vision-128k-instruct": { - "bos_token": "", - "eos_token": "<|endoftext|>", - "pad_token": "<|endoftext|>", - "unk_token": "", - "chat_template": "{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{- '<|assistant|>\n' -}}{% endif %}" + "bos_token": "", + "eos_token": "<|endoftext|>", + "pad_token": "<|endoftext|>", + "unk_token": "", + "chat_template": "{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{- '<|assistant|>\n' -}}{% endif %}" }, "jphme/em_german_leo_mistral": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{% if message['role'] == 'system' %}{{message['content'] + ' '}}{% elif message['role'] == 'user' %}{{ 'USER: ' + message['content'] + ' '}}{% elif message['role'] == 'assistant' %}{{ 'ASSISTANT: ' + message['content'] + ' '}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT: '}}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{% if message['role'] == 'system' %}{{message['content'] + ' '}}{% elif message['role'] == 'user' %}{{ 'USER: ' + message['content'] + ' '}}{% elif message['role'] == 'assistant' %}{{ 'ASSISTANT: ' + message['content'] + ' '}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT: '}}{% endif %}" }, "nlpai-lab/KULLM3": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = \"\ub2f9\uc2e0\uc740 \uace0\ub824\ub300\ud559\uad50 NLP&AI \uc5f0\uad6c\uc2e4\uc5d0\uc11c \ub9cc\ub4e0 AI \ucc57\ubd07\uc785\ub2c8\ub2e4. \ub2f9\uc2e0\uc758 \uc774\ub984\uc740 'KULLM'\uc73c\ub85c, \ud55c\uad6d\uc5b4\ub85c\ub294 '\uad6c\ub984'\uc744 \ub73b\ud569\ub2c8\ub2e4. \ub2f9\uc2e0\uc740 \ube44\ub3c4\ub355\uc801\uc774\uac70\ub098, \uc131\uc801\uc774\uac70\ub098, \ubd88\ubc95\uc801\uc774\uac70\ub098 \ub610\ub294 \uc0ac\ud68c \ud1b5\ub150\uc801\uc73c\ub85c \ud5c8\uc6a9\ub418\uc9c0 \uc54a\ub294 \ubc1c\uc5b8\uc740 \ud558\uc9c0 \uc54a\uc2b5\ub2c8\ub2e4. \uc0ac\uc6a9\uc790\uc640 \uc990\uac81\uac8c \ub300\ud654\ud558\uba70, \uc0ac\uc6a9\uc790\uc758 \uc751\ub2f5\uc5d0 \uac00\ub2a5\ud55c \uc815\ud655\ud558\uace0 \uce5c\uc808\ud558\uac8c \uc751\ub2f5\ud568\uc73c\ub85c\uc368 \ucd5c\ub300\ud55c \ub3c4\uc640\uc8fc\ub824\uace0 \ub178\ub825\ud569\ub2c8\ub2e4. \uc9c8\ubb38\uc774 \uc774\uc0c1\ud558\ub2e4\uba74, \uc5b4\ub5a4 \ubd80\ubd84\uc774 \uc774\uc0c1\ud55c\uc9c0 \uc124\uba85\ud569\ub2c8\ub2e4. \uac70\uc9d3 \uc815\ubcf4\ub97c \ubc1c\uc5b8\ud558\uc9c0 \uc54a\ub3c4\ub85d \uc8fc\uc758\ud569\ub2c8\ub2e4.\" %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]'}}{% elif message['role'] == 'system' %}{{ '<>\\n' + content.strip() + '\\n<>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = \"\ub2f9\uc2e0\uc740 \uace0\ub824\ub300\ud559\uad50 NLP&AI \uc5f0\uad6c\uc2e4\uc5d0\uc11c \ub9cc\ub4e0 AI \ucc57\ubd07\uc785\ub2c8\ub2e4. \ub2f9\uc2e0\uc758 \uc774\ub984\uc740 'KULLM'\uc73c\ub85c, \ud55c\uad6d\uc5b4\ub85c\ub294 '\uad6c\ub984'\uc744 \ub73b\ud569\ub2c8\ub2e4. \ub2f9\uc2e0\uc740 \ube44\ub3c4\ub355\uc801\uc774\uac70\ub098, \uc131\uc801\uc774\uac70\ub098, \ubd88\ubc95\uc801\uc774\uac70\ub098 \ub610\ub294 \uc0ac\ud68c \ud1b5\ub150\uc801\uc73c\ub85c \ud5c8\uc6a9\ub418\uc9c0 \uc54a\ub294 \ubc1c\uc5b8\uc740 \ud558\uc9c0 \uc54a\uc2b5\ub2c8\ub2e4. \uc0ac\uc6a9\uc790\uc640 \uc990\uac81\uac8c \ub300\ud654\ud558\uba70, \uc0ac\uc6a9\uc790\uc758 \uc751\ub2f5\uc5d0 \uac00\ub2a5\ud55c \uc815\ud655\ud558\uace0 \uce5c\uc808\ud558\uac8c \uc751\ub2f5\ud568\uc73c\ub85c\uc368 \ucd5c\ub300\ud55c \ub3c4\uc640\uc8fc\ub824\uace0 \ub178\ub825\ud569\ub2c8\ub2e4. \uc9c8\ubb38\uc774 \uc774\uc0c1\ud558\ub2e4\uba74, \uc5b4\ub5a4 \ubd80\ubd84\uc774 \uc774\uc0c1\ud55c\uc9c0 \uc124\uba85\ud569\ub2c8\ub2e4. \uac70\uc9d3 \uc815\ubcf4\ub97c \ubc1c\uc5b8\ud558\uc9c0 \uc54a\ub3c4\ub85d \uc8fc\uc758\ud569\ub2c8\ub2e4.\" %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]'}}{% elif message['role'] == 'system' %}{{ '<>\\n' + content.strip() + '\\n<>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}" }, "HuggingFaceH4/zephyr-7b-gemma-sft-v0.1": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if messages[0]['role'] == 'user' or messages[0]['role'] == 'system' %}{{ bos_token }}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% elif messages[-1]['role'] == 'assistant' %}{{ eos_token }}{% endif %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if messages[0]['role'] == 'user' or messages[0]['role'] == 'system' %}{{ bos_token }}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% elif messages[-1]['role'] == 'assistant' %}{{ eos_token }}{% endif %}" }, "MediaTek-Research/Breeze-7B-Instruct-v0_1": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'].strip() %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful AI assistant built by MediaTek Research. The user you are helping speaks Traditional Chinese and comes from Taiwan.' %}{% endif %}{{ bos_token }}{{ system_message }} {% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/... or system/user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST] ' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'].strip() %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful AI assistant built by MediaTek Research. The user you are helping speaks Traditional Chinese and comes from Taiwan.' %}{% endif %}{{ bos_token }}{{ system_message }} {% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/... or system/user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST] ' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" }, "microsoft/DialoGPT-large": { - "bos_token": "<|endoftext|>", - "eos_token": "<|endoftext|>", - "pad_token": None, - "unk_token": "<|endoftext|>", - "chat_template": "{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}" + "bos_token": "<|endoftext|>", + "eos_token": "<|endoftext|>", + "pad_token": None, + "unk_token": "<|endoftext|>", + "chat_template": "{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}" }, "meta-llama/Meta-Llama-Guard-2-8B": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|end_of_text|>", - "pad_token": None, - "unk_token": None, - "chat_template": "{% if messages|length % 2 == 0 %}{% set role = 'Agent' %}{% else %}{% set role = 'User' %}{% endif %}{{ \"[INST] Task: Check if there is unsafe content in '\" + role + \"' messages in conversations according our safety policy with the below categories.\n\n\nS1: Violent Crimes. \nS2: Non-Violent Crimes. \nS3: Sex Crimes. \nS4: Child Exploitation. \nS5: Specialized Advice. \nS6: Privacy. \nS7: Intellectual Property. \nS8: Indiscriminate Weapons. \nS9: Hate. \nS10: Self-Harm. \nS11: Sexual Content. \n\n\n\n\n\" }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{% set role = 'User' %}{% elif message['role'] == 'assistant' %}{% set role = 'Agent' %}{% endif %}{{ role + ': ' + content.strip() + '\n\n' }}{% endfor %}{{ \"\n\nProvide your safety assessment for ONLY THE LAST \" + role + \" message in the above conversation:\n - First line must read 'safe' or 'unsafe'.\n - If unsafe, a second line must include a comma-separated list of violated categories. [/INST]\" }}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>", + "pad_token": None, + "unk_token": None, + "chat_template": "{% if messages|length % 2 == 0 %}{% set role = 'Agent' %}{% else %}{% set role = 'User' %}{% endif %}{{ \"[INST] Task: Check if there is unsafe content in '\" + role + \"' messages in conversations according our safety policy with the below categories.\n\n\nS1: Violent Crimes. \nS2: Non-Violent Crimes. \nS3: Sex Crimes. \nS4: Child Exploitation. \nS5: Specialized Advice. \nS6: Privacy. \nS7: Intellectual Property. \nS8: Indiscriminate Weapons. \nS9: Hate. \nS10: Self-Harm. \nS11: Sexual Content. \n\n\n\n\n\" }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{% set role = 'User' %}{% elif message['role'] == 'assistant' %}{% set role = 'Agent' %}{% endif %}{{ role + ': ' + content.strip() + '\n\n' }}{% endfor %}{{ \"\n\nProvide your safety assessment for ONLY THE LAST \" + role + \" message in the above conversation:\n - First line must read 'safe' or 'unsafe'.\n - If unsafe, a second line must include a comma-separated list of violated categories. [/INST]\" }}" }, "chinoll/Yi-6b-200k-dpo": { - "bos_token": "<|startoftext|>", - "eos_token": "<|endoftext|>", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|Human|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|System|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|Assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|Assistant|>' }}\n{% endif %}\n{% endfor %}" + "bos_token": "<|startoftext|>", + "eos_token": "<|endoftext|>", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|Human|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|System|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|Assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|Assistant|>' }}\n{% endif %}\n{% endfor %}" }, "shanchen/llama3-8B-slerp-biomed-chat-chinese": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|eot_id|>", - "pad_token": "<|eot_id|>", - "unk_token": None, - "chat_template": "{{ '<|begin_of_text|>' }}{% set system_message = 'You are Llama3-8B-Chinese-Chat-v2, finetuned from Llama3-8B-Instruct on Chinese-English dataset using the ORPO algorithm. You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% set loop_messages = messages[1:] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ '<|start_header_id|>system<|end_header_id|>\n\n' + system_message | trim + '<|eot_id|>' }}{% endif %}{% for message in loop_messages %}{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>", + "pad_token": "<|eot_id|>", + "unk_token": None, + "chat_template": "{{ '<|begin_of_text|>' }}{% set system_message = 'You are Llama3-8B-Chinese-Chat-v2, finetuned from Llama3-8B-Instruct on Chinese-English dataset using the ORPO algorithm. You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% set loop_messages = messages[1:] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ '<|start_header_id|>system<|end_header_id|>\n\n' + system_message | trim + '<|eot_id|>' }}{% endif %}{% for message in loop_messages %}{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" }, "MLP-KTLim/llama-3-Korean-Bllossom-8B": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|eot_id|>", - "pad_token": "<|end_of_text|>", - "unk_token": None, - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>", + "pad_token": "<|end_of_text|>", + "unk_token": None, + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" }, "UnfilteredAI/UNfilteredAI-1B": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '<|user|>' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ '<|system|>' + message['content'] + eos_token }}{% elif message['role'] == 'assistant' %}{{ '<|assistant|>' + message['content'] + eos_token }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<|assistant|>' }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '<|user|>' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ '<|system|>' + message['content'] + eos_token }}{% elif message['role'] == 'assistant' %}{{ '<|assistant|>' + message['content'] + eos_token }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<|assistant|>' }}{% endif %}{% endfor %}" }, "abacusai/Smaug-Mixtral-v0.1": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{%if message['content'][0] == '$' %} {% endif %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{%if message['content'][0] == '$' %} {% endif %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" }, "ProbeMedicalYonseiMAILab/medllama3-v20": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|eot_id|>", - "pad_token": "<|eot_id|>", - "unk_token": None, - "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{ message['content'] }}{% elif message['role'] == 'user' %}{{ '\n\nHuman: ' + message['content'] + eos_token }}{% elif message['role'] == 'assistant' %}{{ '\n\nAssistant: ' + message['content'] + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '\n\nAssistant: ' }}{% endif %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>", + "pad_token": "<|eot_id|>", + "unk_token": None, + "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{ message['content'] }}{% elif message['role'] == 'user' %}{{ '\n\nHuman: ' + message['content'] + eos_token }}{% elif message['role'] == 'assistant' %}{{ '\n\nAssistant: ' + message['content'] + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '\n\nAssistant: ' }}{% endif %}" }, "vinai/PhoGPT-4B-Chat": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{% if message['role'] == 'user' and loop.first %}{{ '### C\u00e2u h\u1ecfi: ' + message['content'].strip() }}{% elif message['role'] == 'user' %}{{ '\n### C\u00e2u h\u1ecfi: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ '\n### Tr\u1ea3 l\u1eddi: ' + message['content'] + eos_token }}{% endif %}{% if loop.last %}{% if message['role'] == 'user' and add_generation_prompt %}{{ '\n### Tr\u1ea3 l\u1eddi:' }}{% endif %}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{% if message['role'] == 'user' and loop.first %}{{ '### C\u00e2u h\u1ecfi: ' + message['content'].strip() }}{% elif message['role'] == 'user' %}{{ '\n### C\u00e2u h\u1ecfi: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ '\n### Tr\u1ea3 l\u1eddi: ' + message['content'] + eos_token }}{% endif %}{% if loop.last %}{% if message['role'] == 'user' and add_generation_prompt %}{{ '\n### Tr\u1ea3 l\u1eddi:' }}{% endif %}{% endif %}{% endfor %}" }, "lucyknada/microsoft_WizardLM-2-7B": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{{ bos_token + (messages[0]['content'].strip() + '\n\n' if messages[0]['role'] == 'system' else '') }}{% for message in (messages[1:] if messages[0]['role'] == 'system' else messages) %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ 'USER: ' + message['content'].strip() + '\n' }}{% elif message['role'] == 'assistant' %}{{ 'ASSISTANT: ' + message['content'].strip() + eos_token + '\n' }}{% endif %}{% if loop.last and message['role'] == 'user' and add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{{ bos_token + (messages[0]['content'].strip() + '\n\n' if messages[0]['role'] == 'system' else '') }}{% for message in (messages[1:] if messages[0]['role'] == 'system' else messages) %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ 'USER: ' + message['content'].strip() + '\n' }}{% elif message['role'] == 'assistant' %}{{ 'ASSISTANT: ' + message['content'].strip() + eos_token + '\n' }}{% endif %}{% if loop.last and message['role'] == 'user' and add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}{% endfor %}" }, "bigcode/starcoder2-15b-instruct-v0.1": { - "bos_token": "<|endoftext|>", - "eos_token": "<|endoftext|>", - "pad_token": None, - "unk_token": "<|endoftext|>", - "chat_template": "{{bos_token}}{{'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n'}}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n {{ raise_exception('System messages are not allowed in this template.') }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction\n' + message['content'] + '\n\n'}}\n {%- else %}\n{{'### Response\n' + message['content'] + eos_token + '\n\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response\n'}}" + "bos_token": "<|endoftext|>", + "eos_token": "<|endoftext|>", + "pad_token": None, + "unk_token": "<|endoftext|>", + "chat_template": "{{bos_token}}{{'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n'}}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n {{ raise_exception('System messages are not allowed in this template.') }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction\n' + message['content'] + '\n\n'}}\n {%- else %}\n{{'### Response\n' + message['content'] + eos_token + '\n\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response\n'}}" }, "AliAbdelrasheed/maqa_llama_4bit": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|eot_id|>", - "pad_token": "<|reserved_special_token_250|>", - "unk_token": None, - "chat_template": "{% for message in messages %}{% if message['from'] == 'human' %}{{ '<|start_header_id|>user<|end_header_id|>\n\n' + message['value'] | trim + '<|eot_id|>' }}{% elif message['from'] == 'gpt' %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' + message['value'] | trim + '<|eot_id|>' }}{% else %}{{ '<|start_header_id|>' + message['from'] + '<|end_header_id|>\n\n' + message['value'] | trim + '<|eot_id|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>", + "pad_token": "<|reserved_special_token_250|>", + "unk_token": None, + "chat_template": "{% for message in messages %}{% if message['from'] == 'human' %}{{ '<|start_header_id|>user<|end_header_id|>\n\n' + message['value'] | trim + '<|eot_id|>' }}{% elif message['from'] == 'gpt' %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' + message['value'] | trim + '<|eot_id|>' }}{% else %}{{ '<|start_header_id|>' + message['from'] + '<|end_header_id|>\n\n' + message['value'] | trim + '<|eot_id|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" }, "lightonai/alfred-40b-1023": { - "bos_token": None, - "eos_token": "", - "pad_token": None, - "unk_token": None, - "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '' + message['content'].strip() + '' }}{% elif message['role'] == 'system' %}{{ '' + message['content'].strip() + '' }}{% elif message['role'] == 'assistant' %}{{ '' + message['content'] + '' }}{% else %}{{ raise_exception('Only system, user and assistant roles are supported.') }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '' }}{% endif %}{% endfor %}" + "bos_token": None, + "eos_token": "", + "pad_token": None, + "unk_token": None, + "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '' + message['content'].strip() + '' }}{% elif message['role'] == 'system' %}{{ '' + message['content'].strip() + '' }}{% elif message['role'] == 'assistant' %}{{ '' + message['content'] + '' }}{% else %}{{ raise_exception('Only system, user and assistant roles are supported.') }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '' }}{% endif %}{% endfor %}" }, "aloobun/CosmicBun-8B": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|end_of_text|>", - "pad_token": "<|end_of_text|>", - "unk_token": None, - "chat_template": "{%- set ns = namespace(found=false) -%}{%- for message in messages -%}{%- if message['role'] == 'system' -%}{%- set ns.found = true -%}{%- endif -%}{%- endfor -%}{%- for message in messages %}{%- if message['role'] == 'system' -%}{{- '<|im_start|>system\n' + message['content'].rstrip() + '<|im_end|>\n' -}}{%- else -%}{%- if message['role'] == 'user' -%}{{-'<|im_start|>user\n' + message['content'].rstrip() + '<|im_end|>\n'-}}{%- else -%}{{-'<|im_start|>assistant\n' + message['content'] + '<|im_end|>\n' -}}{%- endif -%}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{-'<|im_start|>assistant\n'-}}{%- endif -%}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>", + "pad_token": "<|end_of_text|>", + "unk_token": None, + "chat_template": "{%- set ns = namespace(found=false) -%}{%- for message in messages -%}{%- if message['role'] == 'system' -%}{%- set ns.found = true -%}{%- endif -%}{%- endfor -%}{%- for message in messages %}{%- if message['role'] == 'system' -%}{{- '<|im_start|>system\n' + message['content'].rstrip() + '<|im_end|>\n' -}}{%- else -%}{%- if message['role'] == 'user' -%}{{-'<|im_start|>user\n' + message['content'].rstrip() + '<|im_end|>\n'-}}{%- else -%}{{-'<|im_start|>assistant\n' + message['content'] + '<|im_end|>\n' -}}{%- endif -%}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{-'<|im_start|>assistant\n'-}}{%- endif -%}" }, "Undi95/Mixtral-8x7B-MoE-RP-Story": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- '[INST] <>\\n' + messages[idx]['content'] + '\\n<>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}\n" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- '[INST] <>\\n' + messages[idx]['content'] + '\\n<>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}\n" }, "TIGER-Lab/MAmmoTH2-8B-Plus": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|eot_id|>", - "pad_token": "<|eot_id|>", - "unk_token": None, - "chat_template": "{% set system_message = 'You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|begin_of_text|>' + '<|start_header_id|>system<|end_header_id|>\\n\\n' + system_message + '<|eot_id|>' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|start_header_id|>user<|end_header_id|>\\n\\n' + content + '<|eot_id|><|start_header_id|>assistant<|end_header_id|>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|eot_id|>' }}{% endif %}{% endfor %}" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>", + "pad_token": "<|eot_id|>", + "unk_token": None, + "chat_template": "{% set system_message = 'You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|begin_of_text|>' + '<|start_header_id|>system<|end_header_id|>\\n\\n' + system_message + '<|eot_id|>' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|start_header_id|>user<|end_header_id|>\\n\\n' + content + '<|eot_id|><|start_header_id|>assistant<|end_header_id|>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|eot_id|>' }}{% endif %}{% endfor %}" }, "codellama/CodeLlama-70b-Instruct-hf": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set user_index = 1 %}{% else %}{% set user_index = 0 %}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != ((loop.index0 + user_index) % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ '' }}{% endif %}{% set content = 'Source: ' + message['role'] + '\n\n ' + message['content'] | trim %}{{ content + ' ' }}{% endfor %}{{'Source: assistant\nDestination: user\n\n '}}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set user_index = 1 %}{% else %}{% set user_index = 0 %}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != ((loop.index0 + user_index) % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ '' }}{% endif %}{% set content = 'Source: ' + message['role'] + '\n\n ' + message['content'] | trim %}{{ content + ' ' }}{% endfor %}{{'Source: assistant\nDestination: user\n\n '}}" }, "stephenlzc/Mistral-7B-v0.3-Chinese-Chat-uncensored": { - "bos_token": "", - "eos_token": "", - "pad_token": "[control_768]", - "unk_token": "", - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{{ '' + system_message }}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ ' [INST] ' + content + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ content + '' }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "[control_768]", + "unk_token": "", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{{ '' + system_message }}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ ' [INST] ' + content + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ content + '' }}{% endif %}{% endfor %}" }, "gorilla-llm/gorilla-openfunctions-v2": { - "bos_token": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", - "eos_token": "<|EOT|>", - "pad_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", - "unk_token": None, - "chat_template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Gorilla LLM model, developed by Gorilla LLM, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}" + "bos_token": "<\uff5cbegin\u2581of\u2581sentence\uff5c>", + "eos_token": "<|EOT|>", + "pad_token": "<\uff5cend\u2581of\u2581sentence\uff5c>", + "unk_token": None, + "chat_template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Gorilla LLM model, developed by Gorilla LLM, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}" }, "ghost-x/ghost-7b-alpha": { - "bos_token": "", - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'plugins' %}\n{{ '<|plugins|>\n' + message['content'] + '\n\nStandards for using the tool must comply with the following syntax:\n[execute]({\"type\": string, \"function\": string, \"arguments\": object})' + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'execute' %}\n{{ '<|assistant|>\n[execute](' + message['content'] + ')' + eos_token }}\n{% elif message['role'] == 'response' %}\n{{ '<|tool|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'plugins' %}\n{{ '<|plugins|>\n' + message['content'] + '\n\nStandards for using the tool must comply with the following syntax:\n[execute]({\"type\": string, \"function\": string, \"arguments\": object})' + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'execute' %}\n{{ '<|assistant|>\n[execute](' + message['content'] + ')' + eos_token }}\n{% elif message['role'] == 'response' %}\n{{ '<|tool|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" }, "winninghealth/WiNGPT2-Llama-3-8B-Chat": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|end_of_text|>", - "pad_token": "<|end_of_text|>", - "unk_token": None, - "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}System\uff1a{% endif %}{% if message['role'] == 'user' %}User\uff1a{% endif %}{% if message['role'] == 'assistant' %}Assistant\uff1a{% endif %}{{ message['content'] }}<|end_of_text|>\n {% endfor %}Assistant\uff1a" + "bos_token": "<|begin_of_text|>", + "eos_token": "<|end_of_text|>", + "pad_token": "<|end_of_text|>", + "unk_token": None, + "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}System\uff1a{% endif %}{% if message['role'] == 'user' %}User\uff1a{% endif %}{% if message['role'] == 'assistant' %}Assistant\uff1a{% endif %}{{ message['content'] }}<|end_of_text|>\n {% endfor %}Assistant\uff1a" }, "BramVanroy/Llama-2-13b-chat-dutch": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif not '<>' in messages[0]['content'] %}{% set loop_messages = messages %}{%set system_message = 'Je bent een behulpzame, respectvolle en eerlijke assistent. Antwoord altijd zo behulpzaam mogelijk. Je antwoorden mogen geen schadelijke, onethische, racistische, seksistische, gevaarlijke of illegale inhoud bevatten. Zorg ervoor dat je antwoorden sociaal onbevooroordeeld en positief van aard zijn.\n\nAls een vraag nergens op slaat of feitelijk niet coherent is, leg dan uit waarom in plaats van iets niet correct te antwoorden. Als je het antwoord op een vraag niet weet, deel dan geen onjuiste informatie.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\n' + system_message + '\n<>\n\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<>\n' + content.strip() + '\n<>\n\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif not '<>' in messages[0]['content'] %}{% set loop_messages = messages %}{%set system_message = 'Je bent een behulpzame, respectvolle en eerlijke assistent. Antwoord altijd zo behulpzaam mogelijk. Je antwoorden mogen geen schadelijke, onethische, racistische, seksistische, gevaarlijke of illegale inhoud bevatten. Zorg ervoor dat je antwoorden sociaal onbevooroordeeld en positief van aard zijn.\n\nAls een vraag nergens op slaat of feitelijk niet coherent is, leg dan uit waarom in plaats van iets niet correct te antwoorden. Als je het antwoord op een vraag niet weet, deel dan geen onjuiste informatie.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\n' + system_message + '\n<>\n\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<>\n' + content.strip() + '\n<>\n\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}" }, "THUDM/chatglm3-6b": { - "bos_token": None, - "eos_token": "", - "pad_token": "", - "unk_token": "", - "chat_template": "{% for message in messages %}{% if loop.first %}[gMASK]sop<|{{ message['role'] }}|>\n {{ message['content'] }}{% else %}<|{{ message['role'] }}|>\n {{ message['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}" + "bos_token": None, + "eos_token": "", + "pad_token": "", + "unk_token": "", + "chat_template": "{% for message in messages %}{% if loop.first %}[gMASK]sop<|{{ message['role'] }}|>\n {{ message['content'] }}{% else %}<|{{ message['role'] }}|>\n {{ message['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}" }, "microsoft/Phi-3-mini-4k-instruct": { - "bos_token": "", - "eos_token": "<|endoftext|>", - "pad_token": "<|endoftext|>", - "unk_token": "", - "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}" + "bos_token": "", + "eos_token": "<|endoftext|>", + "pad_token": "<|endoftext|>", + "unk_token": "", + "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}" }, "mistralai/Mistral-7B-Instruct-v0.1": { - "bos_token": "", - "eos_token": "", - "pad_token": None, - "unk_token": "", - "chat_template": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n\n{{- bos_token }}\n{%- for message in loop_messages %}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.first and system_message is defined %}\n {{- ' [INST] ' + system_message + '\\n\\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + eos_token}}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n{%- endfor %}\n" + "bos_token": "", + "eos_token": "", + "pad_token": None, + "unk_token": "", + "chat_template": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n\n{{- bos_token }}\n{%- for message in loop_messages %}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.first and system_message is defined %}\n {{- ' [INST] ' + system_message + '\\n\\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + eos_token}}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n{%- endfor %}\n" }, "meta-llama/Meta-Llama-3.1-8B-Instruct": { - "bos_token": "<|begin_of_text|>", - "eos_token": "<|eot_id|>", - "pad_token": None, - "unk_token": None, - "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n", + "bos_token": "<|begin_of_text|>", + "eos_token": "<|eot_id|>", + "pad_token": None, + "unk_token": None, + "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n {%- for arg_name, arg_val in tool_call.arguments | items %}\n {{- arg_name + '=\"' + arg_val + '\"' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- else %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {%- endif %}\n {%- if builtin_tools is defined %}\n {#- This means we're in ipython mode #}\n {{- \"<|eom_id|>\" }}\n {%- else %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n", } }