commit bde8c4487475659c1ee7f8bea34429c0001ad0b7 Author: cruldra Date: Tue Sep 23 20:24:15 2025 +0800 Init diff --git a/.difyignore b/.difyignore new file mode 100644 index 0000000..7243aa1 --- /dev/null +++ b/.difyignore @@ -0,0 +1,2 @@ +../../.difyignore +.venv \ No newline at end of file diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..c9d5a25 --- /dev/null +++ b/.env.example @@ -0,0 +1,3 @@ +INSTALL_METHOD=remote +REMOTE_INSTALL_URL=debug-plugin.dify.dev:5003 +REMOTE_INSTALL_KEY=********-****-****-****-************ diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c66bb57 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +uv.lock +.venv \ No newline at end of file diff --git a/.python-version b/.python-version new file mode 100644 index 0000000..e4fba21 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.12 diff --git a/.verification.dify.json b/.verification.dify.json new file mode 100644 index 0000000..b1d9b43 --- /dev/null +++ b/.verification.dify.json @@ -0,0 +1 @@ +{"authorized_category":"langgenius"} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..a069583 --- /dev/null +++ b/README.md @@ -0,0 +1,9 @@ +## Overview + +This plugin provides access to models that are OpenAI-compatible, including LLMs, reranking, text embedding, speech-to-text (STT), and text-to-speech(TTS) models. Developers can easily add models by providing configuration parameters such as the model name and API key. + +## Configure + +Configure the OpenAI-API-compatible model by providing its core details (Type, Name, API Key, URL) and adjusting further options like completion, context, and token limits, as well as streaming and vision settings. Save when done. + +![](./_assets/openai_api_compatible-01.png) diff --git a/README_zh.md b/README_zh.md new file mode 100644 index 0000000..464cf72 --- /dev/null +++ b/README_zh.md @@ -0,0 +1,9 @@ +## 概述 + +此插件提供对OpenAI兼容模型的访问,包括大语言模型(LLM)、重排序、文本嵌入、语音转文本(STT)和文本转语音(TTS)模型。开发者可以通过提供模型名称和API密钥等配置参数轻松添加模型。 + +## 配置 + +通过提供核心详细信息(类型、名称、API密钥、URL)来配置OpenAI-API兼容模型,并调整其他选项,如完成、上下文和令牌限制,以及流式传输和视觉设置。完成后保存。 + +![](./_assets/openai_api_compatible-01.png) diff --git a/_assets/audio.mp3 b/_assets/audio.mp3 new file mode 100644 index 0000000..7c86e02 Binary files /dev/null and b/_assets/audio.mp3 differ diff --git a/_assets/icon.svg b/_assets/icon.svg new file mode 100644 index 0000000..ce65af4 --- /dev/null +++ b/_assets/icon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/_assets/openai_api_compatible-01.png b/_assets/openai_api_compatible-01.png new file mode 100644 index 0000000..4189f8d Binary files /dev/null and b/_assets/openai_api_compatible-01.png differ diff --git a/main.py b/main.py new file mode 100644 index 0000000..37eb41b --- /dev/null +++ b/main.py @@ -0,0 +1,6 @@ +from dify_plugin import Plugin, DifyPluginEnv + +plugin = Plugin(DifyPluginEnv()) + +if __name__ == "__main__": + plugin.run() diff --git a/manifest.yaml b/manifest.yaml new file mode 100644 index 0000000..9b3d6ba --- /dev/null +++ b/manifest.yaml @@ -0,0 +1,31 @@ +version: 0.0.22 +type: plugin +author: "langgenius" +name: "openai_api_compatible" +description: + en_US: Model providers compatible with OpenAI's API standard, such as LM Studio. + zh_Hans: 兼容 OpenAI API 的模型供应商,例如 LM Studio 。 +label: + en_US: "OpenAI-API-compatible" +created_at: "2024-07-12T08:03:44.658609186Z" +icon: icon.svg +resource: + memory: 1048576 + permission: + tool: + enabled: true + model: + enabled: true + llm: true +plugins: + models: + - "provider/openai_api_compatible.yaml" +meta: + version: 0.0.1 + arch: + - "amd64" + - "arm64" + runner: + language: "python" + version: "3.12" + entrypoint: "main" diff --git a/models/common_openai.py b/models/common_openai.py new file mode 100644 index 0000000..ff6796e --- /dev/null +++ b/models/common_openai.py @@ -0,0 +1,60 @@ +from collections.abc import Mapping + +import openai +from httpx import Timeout + +from dify_plugin.errors.model import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) + + +class _CommonOpenAI: + def _to_credential_kwargs(self, credentials: Mapping) -> dict: + """ + Transform credentials to kwargs for model instance + + :param credentials: + :return: + """ + credentials_kwargs = { + "api_key": credentials["openai_api_key"], + "timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0), + "max_retries": 1, + } + + if credentials.get("openai_api_base"): + openai_api_base = credentials["openai_api_base"].rstrip("/") + credentials_kwargs["base_url"] = openai_api_base + "/v1" + + if "openai_organization" in credentials: + credentials_kwargs["organization"] = credentials["openai_organization"] + + return credentials_kwargs + + @property + def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: + """ + Map model invoke error to unified error + The key is the error type thrown to the caller + The value is the error type thrown by the model, + which needs to be converted into a unified error type for the caller. + + :return: Invoke error mapping + """ + return { + InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError], + InvokeServerUnavailableError: [openai.InternalServerError], + InvokeRateLimitError: [openai.RateLimitError], + InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError], + InvokeBadRequestError: [ + openai.BadRequestError, + openai.NotFoundError, + openai.UnprocessableEntityError, + openai.APIError, + ], + } diff --git a/models/llm/llm.py b/models/llm/llm.py new file mode 100644 index 0000000..05ca7b6 --- /dev/null +++ b/models/llm/llm.py @@ -0,0 +1,167 @@ +import re +from contextlib import suppress +from typing import Mapping, Optional, Union, Generator + +from dify_plugin.entities.model import ( + AIModelEntity, + DefaultParameterName, + I18nObject, + ModelFeature, + ParameterRule, + ParameterType, +) +from dify_plugin.entities.model.llm import LLMResult +from dify_plugin.entities.model.message import ( + PromptMessage, + PromptMessageRole, + PromptMessageTool, + SystemPromptMessage, + AssistantPromptMessage, +) +from dify_plugin.interfaces.model.openai_compatible.llm import OAICompatLargeLanguageModel +from typing import List + + +class OpenAILargeLanguageModel(OAICompatLargeLanguageModel): + # Pre-compiled regex for better performance + _THINK_PATTERN = re.compile(r"^.*?\s*", re.DOTALL) + + def get_customizable_model_schema( + self, model: str, credentials: Mapping | dict + ) -> AIModelEntity: + entity = super().get_customizable_model_schema(model, credentials) + + agent_though_support = credentials.get("agent_though_support", "not_supported") + if agent_though_support == "supported": + try: + entity.features.index(ModelFeature.AGENT_THOUGHT) + except ValueError: + entity.features.append(ModelFeature.AGENT_THOUGHT) + + structured_output_support = credentials.get("structured_output_support", "not_supported") + if structured_output_support == "supported": + # ---- + # The following section should be added after the new version of `dify-plugin-sdks` + # is released. + # Related Commit: + # https://github.com/langgenius/dify-plugin-sdks/commit/0690573a879caf43f92494bf411f45a1835d96f6 + # ---- + # try: + # entity.features.index(ModelFeature.STRUCTURED_OUTPUT) + # except ValueError: + # entity.features.append(ModelFeature.STRUCTURED_OUTPUT) + + entity.parameter_rules.append( + ParameterRule( + name=DefaultParameterName.RESPONSE_FORMAT.value, + label=I18nObject(en_US="Response Format", zh_Hans="回复格式"), + help=I18nObject( + en_US="Specifying the format that the model must output.", + zh_Hans="指定模型必须输出的格式。", + ), + type=ParameterType.STRING, + options=["text", "json_object", "json_schema"], + required=False, + ) + ) + entity.parameter_rules.append( + ParameterRule( + name=DefaultParameterName.JSON_SCHEMA.value, + use_template=DefaultParameterName.JSON_SCHEMA.value, + ) + ) + + if "display_name" in credentials and credentials["display_name"] != "": + entity.label = I18nObject( + en_US=credentials["display_name"], zh_Hans=credentials["display_name"] + ) + + entity.parameter_rules += [ + ParameterRule( + name="enable_thinking", + label=I18nObject(en_US="Thinking mode", zh_Hans="思考模式"), + help=I18nObject( + en_US="Whether to enable thinking mode, applicable to various thinking mode models deployed on reasoning frameworks such as vLLM and SGLang, for example Qwen3.", + zh_Hans="是否开启思考模式,适用于vLLM和SGLang等推理框架部署的多种思考模式模型,例如Qwen3。", + ), + type=ParameterType.BOOLEAN, + required=False, + ) + ] + return entity + + @classmethod + def _drop_analyze_channel(cls, prompt_messages: List[PromptMessage]) -> None: + """ + Remove thinking content from assistant messages for better performance. + + Uses early exit and pre-compiled regex to minimize overhead. + Args: + prompt_messages: + + Returns: + + """ + for p in prompt_messages: + # Early exit conditions + if not isinstance(p, AssistantPromptMessage): + continue + if not isinstance(p.content, str): + continue + # Quick check to avoid regex if not needed + if not p.content.startswith(""): + continue + + # Only perform regex substitution when necessary + new_content = cls._THINK_PATTERN.sub("", p.content, count=1) + # Only update if changed + if new_content != p.content: + p.content = new_content + + def _invoke( + self, + model: str, + credentials: dict, + prompt_messages: list[PromptMessage], + model_parameters: dict, + tools: Optional[list[PromptMessageTool]] = None, + stop: Optional[list[str]] = None, + stream: bool = True, + user: Optional[str] = None, + ) -> Union[LLMResult, Generator]: + # Compatibility adapter for Dify's 'json_schema' structured output mode. + # The base class does not natively handle the 'json_schema' parameter. This block + # translates it into a standard OpenAI-compatible request by: + # 1. Injecting the JSON schema directly into the system prompt to guide the model. + # This ensures models like gpt-4o produce the correct structured output. + if model_parameters.get("response_format") == "json_schema": + # Use .get() instead of .pop() for safety + json_schema_str = model_parameters.get("json_schema") + + if json_schema_str: + structured_output_prompt = ( + "Your response must be a JSON object that validates against the following JSON schema, and nothing else.\n" + f"JSON Schema: ```json\n{json_schema_str}\n```" + ) + + existing_system_prompt = next( + (p for p in prompt_messages if p.role == PromptMessageRole.SYSTEM), None + ) + if existing_system_prompt: + existing_system_prompt.content = ( + structured_output_prompt + "\n\n" + existing_system_prompt.content + ) + else: + prompt_messages.insert(0, SystemPromptMessage(content=structured_output_prompt)) + + enable_thinking = model_parameters.pop("enable_thinking", None) + if enable_thinking is not None: + model_parameters["chat_template_kwargs"] = {"enable_thinking": bool(enable_thinking)} + + # Remove thinking content from assistant messages for better performance. + with suppress(Exception): + self._drop_analyze_channel(prompt_messages) + + return super()._invoke( + model, credentials, prompt_messages, model_parameters, tools, stop, stream, user + ) diff --git a/models/rerank/rerank.py b/models/rerank/rerank.py new file mode 100644 index 0000000..6f17367 --- /dev/null +++ b/models/rerank/rerank.py @@ -0,0 +1,45 @@ +from typing import Mapping + +from dify_plugin.entities.model import AIModelEntity, I18nObject + +from dify_plugin.interfaces.model.openai_compatible.rerank import OAICompatRerankModel +from dify_plugin.errors.model import CredentialsValidateFailedError + + +class OpenAIRerankModel(OAICompatRerankModel): + def validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate model credentials + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + self._invoke( + model=model, + credentials=credentials, + query="What is the capital of the United States?", + docs=[ + "Carson City is the capital city of the American state of Nevada. At the 2010 United States " + "Census, Carson City had a population of 55,274.", + "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " + "are a political division controlled by the United States. Its capital is Saipan.", + ], + score_threshold=0.8, + top_n=3, + ) + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) from ex + + def get_customizable_model_schema( + self, model: str, credentials: Mapping | dict + ) -> AIModelEntity: + entity = super().get_customizable_model_schema(model, credentials) + + if "display_name" in credentials and credentials["display_name"] != "": + entity.label = I18nObject( + en_US=credentials["display_name"], zh_Hans=credentials["display_name"] + ) + + return entity diff --git a/models/speech2text/speech2text.py b/models/speech2text/speech2text.py new file mode 100644 index 0000000..8c42bd9 --- /dev/null +++ b/models/speech2text/speech2text.py @@ -0,0 +1,27 @@ +from typing import Optional +from dify_plugin.entities.model import AIModelEntity, FetchFrom, I18nObject, ModelType +from dify_plugin.interfaces.model.openai_compatible.speech2text import OAICompatSpeech2TextModel + + +class OpenAISpeech2TextModel(OAICompatSpeech2TextModel): + def get_customizable_model_schema( + self, model: str, credentials: dict + ) -> Optional[AIModelEntity]: + """ + used to define customizable model schema + """ + entity = AIModelEntity( + model=model, + label=I18nObject(en_US=model), + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_type=ModelType.SPEECH2TEXT, + model_properties={}, + parameter_rules=[], + ) + + if "display_name" in credentials and credentials["display_name"] != "": + entity.label = I18nObject( + en_US=credentials["display_name"], zh_Hans=credentials["display_name"] + ) + + return entity diff --git a/models/text_embedding/text_embedding.py b/models/text_embedding/text_embedding.py new file mode 100644 index 0000000..6cd8e85 --- /dev/null +++ b/models/text_embedding/text_embedding.py @@ -0,0 +1,21 @@ +from typing import Mapping + +from dify_plugin.entities.model import AIModelEntity, I18nObject + +from dify_plugin.interfaces.model.openai_compatible.text_embedding import OAICompatEmbeddingModel + + +class OpenAITextEmbeddingModel(OAICompatEmbeddingModel): + + def get_customizable_model_schema( + self, model: str, credentials: Mapping | dict + ) -> AIModelEntity: + credentials = credentials or {} + entity = super().get_customizable_model_schema(model, credentials) + + if "display_name" in credentials and credentials["display_name"] != "": + entity.label = I18nObject( + en_US=credentials["display_name"], zh_Hans=credentials["display_name"] + ) + + return entity diff --git a/models/tts/tts.py b/models/tts/tts.py new file mode 100644 index 0000000..e697c9f --- /dev/null +++ b/models/tts/tts.py @@ -0,0 +1,20 @@ +from typing import Mapping + +from dify_plugin.entities.model import AIModelEntity, I18nObject + +from dify_plugin.interfaces.model.openai_compatible.tts import OAICompatText2SpeechModel + + +class OpenAIText2SpeechModel(OAICompatText2SpeechModel): + + def get_customizable_model_schema( + self, model: str, credentials: Mapping | dict + ) -> AIModelEntity: + entity = super().get_customizable_model_schema(model, credentials) + + if "display_name" in credentials and credentials["display_name"] != "": + entity.label = I18nObject( + en_US=credentials["display_name"], zh_Hans=credentials["display_name"] + ) + + return entity diff --git a/provider/openai_api_compatible.py b/provider/openai_api_compatible.py new file mode 100644 index 0000000..226994a --- /dev/null +++ b/provider/openai_api_compatible.py @@ -0,0 +1,17 @@ +import logging +from collections.abc import Mapping + +from dify_plugin import ModelProvider + +logger = logging.getLogger(__name__) + + +class OpenAIProvider(ModelProvider): + def validate_provider_credentials(self, credentials: Mapping) -> None: + """ + Validate provider credentials + if validate failed, raise exception + + :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. + """ + pass diff --git a/provider/openai_api_compatible.yaml b/provider/openai_api_compatible.yaml new file mode 100644 index 0000000..b57e651 --- /dev/null +++ b/provider/openai_api_compatible.yaml @@ -0,0 +1,276 @@ +provider: openai_api_compatible +label: + en_US: OpenAI-API-compatible +description: + en_US: Model providers compatible with OpenAI's API standard, such as LM Studio. + zh_Hans: 兼容 OpenAI API 的模型供应商,例如 LM Studio 。 +icon_small: + en_US: icon.svg +supported_model_types: + - llm + - rerank + - text-embedding + - speech2text + - tts +configurate_methods: + - customizable-model +model_credential_schema: + model: + label: + en_US: Model Name + zh_Hans: 模型名称 + placeholder: + en_US: Enter full model name + zh_Hans: 输入模型全称 + credential_form_schemas: + - variable: display_name + label: + en_US: Model display name + zh_Hans: 模型显示名称 + type: text-input + required: false + placeholder: + zh_Hans: 模型在界面的显示名称 + en_US: The display name of the model in the interface. + - variable: api_key + label: + en_US: API Key + type: secret-input + required: false + placeholder: + zh_Hans: 在此输入您的 API Key + en_US: Enter your API Key + - variable: endpoint_url + label: + zh_Hans: API endpoint URL + en_US: API endpoint URL + type: text-input + required: true + placeholder: + zh_Hans: Base URL, e.g. https://api.openai.com/v1 + en_US: Base URL, e.g. https://api.openai.com/v1 + - variable: endpoint_model_name + label: + zh_Hans: API endpoint中的模型名称 + en_US: model name for API endpoint + type: text-input + required: false + placeholder: + zh_Hans: endpoint model name, e.g. chatgpt4.0 + en_US: endpoint model name, e.g. chatgpt4.0 + - variable: mode + show_on: + - variable: __model_type + value: llm + label: + en_US: Completion mode + type: select + required: false + default: chat + placeholder: + zh_Hans: 选择对话类型 + en_US: Select completion mode + options: + - value: completion + label: + en_US: Completion + zh_Hans: 补全 + - value: chat + label: + en_US: Chat + zh_Hans: 对话 + - variable: context_size + label: + zh_Hans: 模型上下文长度 + en_US: Model context size + required: true + show_on: + - variable: __model_type + value: llm + type: text-input + default: "4096" + placeholder: + zh_Hans: 在此输入您的模型上下文长度 + en_US: Enter your Model context size + - variable: context_size + label: + zh_Hans: 模型上下文长度 + en_US: Model context size + required: true + show_on: + - variable: __model_type + value: text-embedding + type: text-input + default: "4096" + placeholder: + zh_Hans: 在此输入您的模型上下文长度 + en_US: Enter your Model context size + - variable: context_size + label: + zh_Hans: 模型上下文长度 + en_US: Model context size + required: true + show_on: + - variable: __model_type + value: rerank + type: text-input + default: "4096" + placeholder: + zh_Hans: 在此输入您的模型上下文长度 + en_US: Enter your Model context size + - variable: max_tokens_to_sample + label: + zh_Hans: 最大 token 上限 + en_US: Upper bound for max tokens + show_on: + - variable: __model_type + value: llm + default: "4096" + type: text-input + - variable: agent_though_support + show_on: + - variable: __model_type + value: llm + label: + en_US: Agent Thought + type: select + required: false + default: not_supported + options: + - value: supported + label: + en_US: Support + zh_Hans: 支持 + - value: not_supported + label: + en_US: Not Support + zh_Hans: 不支持 + - variable: function_calling_type + show_on: + - variable: __model_type + value: llm + label: + en_US: Function calling + type: select + required: false + default: no_call + options: + - value: function_call + label: + en_US: Function Call + zh_Hans: Function Call + - value: tool_call + label: + en_US: Tool Call + zh_Hans: Tool Call + - value: no_call + label: + en_US: Not Support + zh_Hans: 不支持 + - variable: stream_function_calling + show_on: + - variable: __model_type + value: llm + label: + en_US: Stream function calling + type: select + required: false + default: not_supported + options: + - value: supported + label: + en_US: Support + zh_Hans: 支持 + - value: not_supported + label: + en_US: Not Support + zh_Hans: 不支持 + - variable: vision_support + show_on: + - variable: __model_type + value: llm + label: + zh_Hans: Vision 支持 + en_US: Vision Support + type: select + required: false + default: no_support + options: + - value: support + label: + en_US: Support + zh_Hans: 支持 + - value: no_support + label: + en_US: Not Support + zh_Hans: 不支持 + - variable: structured_output_support + show_on: + - variable: __model_type + value: llm + label: + en_US: Structured Output + type: select + required: false + default: not_supported + options: + - value: supported + label: + en_US: Support + zh_Hans: 支持 + - value: not_supported + label: + en_US: Not Support + zh_Hans: 不支持 + - variable: stream_mode_auth + show_on: + - variable: __model_type + value: llm + label: + en_US: Stream mode auth + type: select + required: false + default: not_use + options: + - value: use + label: + en_US: Use + zh_Hans: 使用 + - value: not_use + label: + en_US: Not Use + zh_Hans: 不使用 + - variable: stream_mode_delimiter + label: + zh_Hans: 流模式返回结果的分隔符 + en_US: Delimiter for streaming results + show_on: + - variable: __model_type + value: llm + default: '\n\n' + type: text-input + - variable: voices + show_on: + - variable: __model_type + value: tts + label: + en_US: Available Voices (comma-separated) + zh_Hans: 可用声音(用英文逗号分隔) + type: text-input + required: false + default: "alloy" + placeholder: + en_US: "alloy,echo,fable,onyx,nova,shimmer" + zh_Hans: "alloy,echo,fable,onyx,nova,shimmer" + help: + en_US: "List voice names separated by commas. First voice will be used as default." + zh_Hans: "用英文逗号分隔的声音列表。第一个声音将作为默认值。" +extra: + python: + provider_source: provider/openai_api_compatible.py + model_sources: + - "models/llm/llm.py" + - "models/text_embedding/text_embedding.py" + - "models/rerank/rerank.py" + - "models/speech2text/speech2text.py" + - "models/tts/tts.py" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..0b96199 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,21 @@ +[project] +name = "openai-api-compatible-GnRzR6cEy" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.12" + +# uv pip compile pyproject.toml -o ./requirements.txt +dependencies = [ + "dify-plugin>=0.4.3", + "openai>=1.99.9", +] + +# uv run black . -C -l 100 +# uv run ruff check --fix +[dependency-groups] +dev = [ + "black>=25.1.0", + "pytest>=8.4.1", + "ruff>=0.12.5", +] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..0663aba --- /dev/null +++ b/requirements.txt @@ -0,0 +1,127 @@ +# This file was autogenerated by uv via the following command: +# uv pip compile pyproject.toml -o ./requirements.txt +annotated-types==0.7.0 + # via pydantic +anyio==4.10.0 + # via + # httpx + # openai +blinker==1.9.0 + # via flask +certifi==2025.8.3 + # via + # httpcore + # httpx + # requests +cffi==1.17.1 + # via gevent +charset-normalizer==3.4.3 + # via requests +click==8.2.1 + # via flask +colorama==0.4.6 + # via + # click + # tqdm +dify-plugin==0.4.3 + # via openai-api-compatible-gnrzr6cey (pyproject.toml) +distro==1.9.0 + # via openai +dpkt==1.9.8 + # via dify-plugin +flask==3.0.3 + # via dify-plugin +gevent==25.5.1 + # via dify-plugin +greenlet==3.2.4 + # via gevent +h11==0.16.0 + # via httpcore +httpcore==1.0.9 + # via httpx +httpx==0.28.1 + # via + # dify-plugin + # openai +idna==3.10 + # via + # anyio + # httpx + # requests + # yarl +itsdangerous==2.2.0 + # via flask +jinja2==3.1.6 + # via flask +jiter==0.10.0 + # via openai +markupsafe==3.0.2 + # via + # jinja2 + # werkzeug +multidict==6.6.4 + # via yarl +openai==1.99.9 + # via openai-api-compatible-gnrzr6cey (pyproject.toml) +packaging==25.0 + # via dify-plugin +propcache==0.3.2 + # via yarl +pycparser==2.22 + # via cffi +pydantic==2.11.7 + # via + # dify-plugin + # openai + # pydantic-settings +pydantic-core==2.33.2 + # via pydantic +pydantic-settings==2.10.1 + # via dify-plugin +python-dotenv==1.1.1 + # via pydantic-settings +pyyaml==6.0.2 + # via dify-plugin +regex==2025.7.34 + # via tiktoken +requests==2.32.4 + # via + # dify-plugin + # tiktoken +setuptools==80.9.0 + # via + # zope-event + # zope-interface +sniffio==1.3.1 + # via + # anyio + # openai +socksio==1.0.0 + # via dify-plugin +tiktoken==0.8.0 + # via dify-plugin +tqdm==4.67.1 + # via openai +typing-extensions==4.14.1 + # via + # anyio + # openai + # pydantic + # pydantic-core + # typing-inspection +typing-inspection==0.4.1 + # via + # pydantic + # pydantic-settings +urllib3==2.5.0 + # via requests +werkzeug==3.0.6 + # via + # dify-plugin + # flask +yarl==1.20.1 + # via dify-plugin +zope-event==5.1.1 + # via gevent +zope-interface==7.2 + # via gevent