This commit is contained in:
2025-09-23 20:24:15 +08:00
commit bde8c44874
22 changed files with 846 additions and 0 deletions

2
.difyignore Normal file
View File

@@ -0,0 +1,2 @@
../../.difyignore
.venv

3
.env.example Normal file
View File

@@ -0,0 +1,3 @@
INSTALL_METHOD=remote
REMOTE_INSTALL_URL=debug-plugin.dify.dev:5003
REMOTE_INSTALL_KEY=********-****-****-****-************

2
.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
uv.lock
.venv

1
.python-version Normal file
View File

@@ -0,0 +1 @@
3.12

1
.verification.dify.json Normal file
View File

@@ -0,0 +1 @@
{"authorized_category":"langgenius"}

9
README.md Normal file
View File

@@ -0,0 +1,9 @@
## Overview
This plugin provides access to models that are OpenAI-compatible, including LLMs, reranking, text embedding, speech-to-text (STT), and text-to-speech(TTS) models. Developers can easily add models by providing configuration parameters such as the model name and API key.
## Configure
Configure the OpenAI-API-compatible model by providing its core details (Type, Name, API Key, URL) and adjusting further options like completion, context, and token limits, as well as streaming and vision settings. Save when done.
![](./_assets/openai_api_compatible-01.png)

9
README_zh.md Normal file
View File

@@ -0,0 +1,9 @@
## 概述
此插件提供对OpenAI兼容模型的访问包括大语言模型(LLM)、重排序、文本嵌入、语音转文本(STT)和文本转语音(TTS)模型。开发者可以通过提供模型名称和API密钥等配置参数轻松添加模型。
## 配置
通过提供核心详细信息类型、名称、API密钥、URL来配置OpenAI-API兼容模型并调整其他选项如完成、上下文和令牌限制以及流式传输和视觉设置。完成后保存。
![](./_assets/openai_api_compatible-01.png)

BIN
_assets/audio.mp3 Normal file

Binary file not shown.

1
_assets/icon.svg Normal file
View File

@@ -0,0 +1 @@
<svg width="16" height="17" viewBox="0 0 16 17" fill="none" xmlns="http://www.w3.org/2000/svg" class="w-4 h-4 text-gray-400" data-icon="CubeOutline" aria-hidden="true"><g id="cube-outline"><g id="Solid"><path d="M8.26865 1.29003C8.09143 1.25358 7.90866 1.25358 7.73144 1.29003C7.52659 1.33216 7.3435 1.43471 7.19794 1.51624L7.15826 1.53841L6.17628 2.08395C5.85443 2.26276 5.73846 2.66863 5.91727 2.99049C6.09608 3.31234 6.50195 3.4283 6.82381 3.24949L7.80579 2.70395C7.90681 2.64782 7.95839 2.61946 7.99686 2.60091L8.00004 2.59938L8.00323 2.60091C8.0417 2.61946 8.09327 2.64782 8.1943 2.70395L9.17628 3.24949C9.49814 3.4283 9.90401 3.31234 10.0828 2.99048C10.2616 2.66863 10.1457 2.26276 9.82381 2.08395L8.84183 1.53841L8.80215 1.51624C8.65659 1.43471 8.4735 1.33216 8.26865 1.29003Z" fill="currentColor"></path><path d="M12.8238 3.75062C12.5019 3.57181 12.0961 3.68777 11.9173 4.00963C11.7385 4.33148 11.8544 4.73735 12.1763 4.91616L12.6272 5.16668L12.1763 5.41719C11.8545 5.596 11.7385 6.00186 11.9173 6.32372C12.0961 6.64558 12.502 6.76154 12.8238 6.58273L13.3334 6.29966V6.83339C13.3334 7.20158 13.6319 7.50006 14 7.50006C14.3682 7.50006 14.6667 7.20158 14.6667 6.83339V5.79435L14.6668 5.74627C14.6673 5.62441 14.6678 5.48084 14.6452 5.33482C14.6869 5.17472 14.6696 4.99892 14.5829 4.84286C14.4904 4.6764 14.3371 4.56501 14.1662 4.52099C14.0496 4.43038 13.9239 4.36116 13.8173 4.3024L13.7752 4.27915L12.8238 3.75062Z" fill="currentColor"></path><path d="M3.8238 4.91616C4.14566 4.73735 4.26162 4.33148 4.08281 4.00963C3.90401 3.68777 3.49814 3.57181 3.17628 3.75062L2.22493 4.27915L2.18284 4.3024C2.07615 4.36116 1.95045 4.4304 1.83382 4.52102C1.66295 4.56506 1.50977 4.67643 1.41731 4.84286C1.33065 4.99886 1.31323 5.17459 1.35493 5.33464C1.33229 5.48072 1.33281 5.62436 1.33326 5.74627L1.33338 5.79435V6.83339C1.33338 7.20158 1.63185 7.50006 2.00004 7.50006C2.36823 7.50006 2.66671 7.20158 2.66671 6.83339V6.29961L3.17632 6.58273C3.49817 6.76154 3.90404 6.64558 4.08285 6.32372C4.26166 6.00186 4.1457 5.596 3.82384 5.41719L3.3729 5.16666L3.8238 4.91616Z" fill="currentColor"></path><path d="M2.66671 10.1667C2.66671 9.79853 2.36823 9.50006 2.00004 9.50006C1.63185 9.50006 1.33338 9.79853 1.33338 10.1667V11.2058L1.33326 11.2538C1.33262 11.4298 1.33181 11.6509 1.40069 11.8594C1.46024 12.0397 1.55759 12.2051 1.68622 12.3447C1.835 12.5061 2.02873 12.6128 2.18281 12.6977L2.22493 12.721L3.17628 13.2495C3.49814 13.4283 3.90401 13.3123 4.08281 12.9905C4.26162 12.6686 4.14566 12.2628 3.8238 12.084L2.87245 11.5554C2.76582 11.4962 2.71137 11.4656 2.67318 11.4413L2.66995 11.4392L2.66971 11.4354C2.66699 11.3902 2.66671 11.3277 2.66671 11.2058V10.1667Z" fill="currentColor"></path><path d="M14.6667 10.1667C14.6667 9.79853 14.3682 9.50006 14 9.50006C13.6319 9.50006 13.3334 9.79853 13.3334 10.1667V11.2058C13.3334 11.3277 13.3331 11.3902 13.3304 11.4354L13.3301 11.4392L13.3269 11.4413C13.2887 11.4656 13.2343 11.4962 13.1276 11.5554L12.1763 12.084C11.8544 12.2628 11.7385 12.6686 11.9173 12.9905C12.0961 13.3123 12.5019 13.4283 12.8238 13.2495L13.7752 12.721L13.8172 12.6977C13.9713 12.6128 14.1651 12.5061 14.3139 12.3447C14.4425 12.2051 14.5398 12.0397 14.5994 11.8594C14.6683 11.6509 14.6675 11.4298 14.6668 11.2538L14.6667 11.2058V10.1667Z" fill="currentColor"></path><path d="M6.82381 13.7506C6.50195 13.5718 6.09608 13.6878 5.91727 14.0096C5.73846 14.3315 5.85443 14.7374 6.17628 14.9162L7.15826 15.4617L7.19793 15.4839C7.29819 15.54 7.41625 15.6061 7.54696 15.6556C7.66589 15.7659 7.82512 15.8333 8.00008 15.8333C8.17507 15.8333 8.33431 15.7659 8.45324 15.6556C8.58391 15.6061 8.70193 15.54 8.80215 15.4839L8.84183 15.4617L9.82381 14.9162C10.1457 14.7374 10.2616 14.3315 10.0828 14.0096C9.90401 13.6878 9.49814 13.5718 9.17628 13.7506L8.66675 14.0337V13.5C8.66675 13.1318 8.36827 12.8333 8.00008 12.8333C7.63189 12.8333 7.33341 13.1318 7.33341 13.5V14.0337L6.82381 13.7506Z" fill="currentColor"></path><path d="M6.82384 7.08385C6.50199 6.90505 6.09612 7.02101 5.91731 7.34286C5.7385 7.66472 5.85446 8.07059 6.17632 8.2494L7.33341 8.89223V10.1666C7.33341 10.5348 7.63189 10.8333 8.00008 10.8333C8.36827 10.8333 8.66675 10.5348 8.66675 10.1666V8.89223L9.82384 8.2494C10.1457 8.07059 10.2617 7.66472 10.0829 7.34286C9.90404 7.02101 9.49817 6.90505 9.17632 7.08385L8.00008 7.73732L6.82384 7.08385Z" fill="currentColor"></path></g></g></svg>

After

Width:  |  Height:  |  Size: 4.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 156 KiB

6
main.py Normal file
View File

@@ -0,0 +1,6 @@
from dify_plugin import Plugin, DifyPluginEnv
plugin = Plugin(DifyPluginEnv())
if __name__ == "__main__":
plugin.run()

31
manifest.yaml Normal file
View File

@@ -0,0 +1,31 @@
version: 0.0.22
type: plugin
author: "langgenius"
name: "openai_api_compatible"
description:
en_US: Model providers compatible with OpenAI's API standard, such as LM Studio.
zh_Hans: 兼容 OpenAI API 的模型供应商,例如 LM Studio 。
label:
en_US: "OpenAI-API-compatible"
created_at: "2024-07-12T08:03:44.658609186Z"
icon: icon.svg
resource:
memory: 1048576
permission:
tool:
enabled: true
model:
enabled: true
llm: true
plugins:
models:
- "provider/openai_api_compatible.yaml"
meta:
version: 0.0.1
arch:
- "amd64"
- "arm64"
runner:
language: "python"
version: "3.12"
entrypoint: "main"

60
models/common_openai.py Normal file
View File

@@ -0,0 +1,60 @@
from collections.abc import Mapping
import openai
from httpx import Timeout
from dify_plugin.errors.model import (
InvokeAuthorizationError,
InvokeBadRequestError,
InvokeConnectionError,
InvokeError,
InvokeRateLimitError,
InvokeServerUnavailableError,
)
class _CommonOpenAI:
def _to_credential_kwargs(self, credentials: Mapping) -> dict:
"""
Transform credentials to kwargs for model instance
:param credentials:
:return:
"""
credentials_kwargs = {
"api_key": credentials["openai_api_key"],
"timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0),
"max_retries": 1,
}
if credentials.get("openai_api_base"):
openai_api_base = credentials["openai_api_base"].rstrip("/")
credentials_kwargs["base_url"] = openai_api_base + "/v1"
if "openai_organization" in credentials:
credentials_kwargs["organization"] = credentials["openai_organization"]
return credentials_kwargs
@property
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
"""
Map model invoke error to unified error
The key is the error type thrown to the caller
The value is the error type thrown by the model,
which needs to be converted into a unified error type for the caller.
:return: Invoke error mapping
"""
return {
InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError],
InvokeServerUnavailableError: [openai.InternalServerError],
InvokeRateLimitError: [openai.RateLimitError],
InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError],
InvokeBadRequestError: [
openai.BadRequestError,
openai.NotFoundError,
openai.UnprocessableEntityError,
openai.APIError,
],
}

167
models/llm/llm.py Normal file
View File

@@ -0,0 +1,167 @@
import re
from contextlib import suppress
from typing import Mapping, Optional, Union, Generator
from dify_plugin.entities.model import (
AIModelEntity,
DefaultParameterName,
I18nObject,
ModelFeature,
ParameterRule,
ParameterType,
)
from dify_plugin.entities.model.llm import LLMResult
from dify_plugin.entities.model.message import (
PromptMessage,
PromptMessageRole,
PromptMessageTool,
SystemPromptMessage,
AssistantPromptMessage,
)
from dify_plugin.interfaces.model.openai_compatible.llm import OAICompatLargeLanguageModel
from typing import List
class OpenAILargeLanguageModel(OAICompatLargeLanguageModel):
# Pre-compiled regex for better performance
_THINK_PATTERN = re.compile(r"^<think>.*?</think>\s*", re.DOTALL)
def get_customizable_model_schema(
self, model: str, credentials: Mapping | dict
) -> AIModelEntity:
entity = super().get_customizable_model_schema(model, credentials)
agent_though_support = credentials.get("agent_though_support", "not_supported")
if agent_though_support == "supported":
try:
entity.features.index(ModelFeature.AGENT_THOUGHT)
except ValueError:
entity.features.append(ModelFeature.AGENT_THOUGHT)
structured_output_support = credentials.get("structured_output_support", "not_supported")
if structured_output_support == "supported":
# ----
# The following section should be added after the new version of `dify-plugin-sdks`
# is released.
# Related Commit:
# https://github.com/langgenius/dify-plugin-sdks/commit/0690573a879caf43f92494bf411f45a1835d96f6
# ----
# try:
# entity.features.index(ModelFeature.STRUCTURED_OUTPUT)
# except ValueError:
# entity.features.append(ModelFeature.STRUCTURED_OUTPUT)
entity.parameter_rules.append(
ParameterRule(
name=DefaultParameterName.RESPONSE_FORMAT.value,
label=I18nObject(en_US="Response Format", zh_Hans="回复格式"),
help=I18nObject(
en_US="Specifying the format that the model must output.",
zh_Hans="指定模型必须输出的格式。",
),
type=ParameterType.STRING,
options=["text", "json_object", "json_schema"],
required=False,
)
)
entity.parameter_rules.append(
ParameterRule(
name=DefaultParameterName.JSON_SCHEMA.value,
use_template=DefaultParameterName.JSON_SCHEMA.value,
)
)
if "display_name" in credentials and credentials["display_name"] != "":
entity.label = I18nObject(
en_US=credentials["display_name"], zh_Hans=credentials["display_name"]
)
entity.parameter_rules += [
ParameterRule(
name="enable_thinking",
label=I18nObject(en_US="Thinking mode", zh_Hans="思考模式"),
help=I18nObject(
en_US="Whether to enable thinking mode, applicable to various thinking mode models deployed on reasoning frameworks such as vLLM and SGLang, for example Qwen3.",
zh_Hans="是否开启思考模式适用于vLLM和SGLang等推理框架部署的多种思考模式模型例如Qwen3。",
),
type=ParameterType.BOOLEAN,
required=False,
)
]
return entity
@classmethod
def _drop_analyze_channel(cls, prompt_messages: List[PromptMessage]) -> None:
"""
Remove thinking content from assistant messages for better performance.
Uses early exit and pre-compiled regex to minimize overhead.
Args:
prompt_messages:
Returns:
"""
for p in prompt_messages:
# Early exit conditions
if not isinstance(p, AssistantPromptMessage):
continue
if not isinstance(p.content, str):
continue
# Quick check to avoid regex if not needed
if not p.content.startswith("<think>"):
continue
# Only perform regex substitution when necessary
new_content = cls._THINK_PATTERN.sub("", p.content, count=1)
# Only update if changed
if new_content != p.content:
p.content = new_content
def _invoke(
self,
model: str,
credentials: dict,
prompt_messages: list[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
) -> Union[LLMResult, Generator]:
# Compatibility adapter for Dify's 'json_schema' structured output mode.
# The base class does not natively handle the 'json_schema' parameter. This block
# translates it into a standard OpenAI-compatible request by:
# 1. Injecting the JSON schema directly into the system prompt to guide the model.
# This ensures models like gpt-4o produce the correct structured output.
if model_parameters.get("response_format") == "json_schema":
# Use .get() instead of .pop() for safety
json_schema_str = model_parameters.get("json_schema")
if json_schema_str:
structured_output_prompt = (
"Your response must be a JSON object that validates against the following JSON schema, and nothing else.\n"
f"JSON Schema: ```json\n{json_schema_str}\n```"
)
existing_system_prompt = next(
(p for p in prompt_messages if p.role == PromptMessageRole.SYSTEM), None
)
if existing_system_prompt:
existing_system_prompt.content = (
structured_output_prompt + "\n\n" + existing_system_prompt.content
)
else:
prompt_messages.insert(0, SystemPromptMessage(content=structured_output_prompt))
enable_thinking = model_parameters.pop("enable_thinking", None)
if enable_thinking is not None:
model_parameters["chat_template_kwargs"] = {"enable_thinking": bool(enable_thinking)}
# Remove thinking content from assistant messages for better performance.
with suppress(Exception):
self._drop_analyze_channel(prompt_messages)
return super()._invoke(
model, credentials, prompt_messages, model_parameters, tools, stop, stream, user
)

45
models/rerank/rerank.py Normal file
View File

@@ -0,0 +1,45 @@
from typing import Mapping
from dify_plugin.entities.model import AIModelEntity, I18nObject
from dify_plugin.interfaces.model.openai_compatible.rerank import OAICompatRerankModel
from dify_plugin.errors.model import CredentialsValidateFailedError
class OpenAIRerankModel(OAICompatRerankModel):
def validate_credentials(self, model: str, credentials: dict) -> None:
"""
Validate model credentials
:param model: model name
:param credentials: model credentials
:return:
"""
try:
self._invoke(
model=model,
credentials=credentials,
query="What is the capital of the United States?",
docs=[
"Carson City is the capital city of the American state of Nevada. At the 2010 United States "
"Census, Carson City had a population of 55,274.",
"The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that "
"are a political division controlled by the United States. Its capital is Saipan.",
],
score_threshold=0.8,
top_n=3,
)
except Exception as ex:
raise CredentialsValidateFailedError(str(ex)) from ex
def get_customizable_model_schema(
self, model: str, credentials: Mapping | dict
) -> AIModelEntity:
entity = super().get_customizable_model_schema(model, credentials)
if "display_name" in credentials and credentials["display_name"] != "":
entity.label = I18nObject(
en_US=credentials["display_name"], zh_Hans=credentials["display_name"]
)
return entity

View File

@@ -0,0 +1,27 @@
from typing import Optional
from dify_plugin.entities.model import AIModelEntity, FetchFrom, I18nObject, ModelType
from dify_plugin.interfaces.model.openai_compatible.speech2text import OAICompatSpeech2TextModel
class OpenAISpeech2TextModel(OAICompatSpeech2TextModel):
def get_customizable_model_schema(
self, model: str, credentials: dict
) -> Optional[AIModelEntity]:
"""
used to define customizable model schema
"""
entity = AIModelEntity(
model=model,
label=I18nObject(en_US=model),
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_type=ModelType.SPEECH2TEXT,
model_properties={},
parameter_rules=[],
)
if "display_name" in credentials and credentials["display_name"] != "":
entity.label = I18nObject(
en_US=credentials["display_name"], zh_Hans=credentials["display_name"]
)
return entity

View File

@@ -0,0 +1,21 @@
from typing import Mapping
from dify_plugin.entities.model import AIModelEntity, I18nObject
from dify_plugin.interfaces.model.openai_compatible.text_embedding import OAICompatEmbeddingModel
class OpenAITextEmbeddingModel(OAICompatEmbeddingModel):
def get_customizable_model_schema(
self, model: str, credentials: Mapping | dict
) -> AIModelEntity:
credentials = credentials or {}
entity = super().get_customizable_model_schema(model, credentials)
if "display_name" in credentials and credentials["display_name"] != "":
entity.label = I18nObject(
en_US=credentials["display_name"], zh_Hans=credentials["display_name"]
)
return entity

20
models/tts/tts.py Normal file
View File

@@ -0,0 +1,20 @@
from typing import Mapping
from dify_plugin.entities.model import AIModelEntity, I18nObject
from dify_plugin.interfaces.model.openai_compatible.tts import OAICompatText2SpeechModel
class OpenAIText2SpeechModel(OAICompatText2SpeechModel):
def get_customizable_model_schema(
self, model: str, credentials: Mapping | dict
) -> AIModelEntity:
entity = super().get_customizable_model_schema(model, credentials)
if "display_name" in credentials and credentials["display_name"] != "":
entity.label = I18nObject(
en_US=credentials["display_name"], zh_Hans=credentials["display_name"]
)
return entity

View File

@@ -0,0 +1,17 @@
import logging
from collections.abc import Mapping
from dify_plugin import ModelProvider
logger = logging.getLogger(__name__)
class OpenAIProvider(ModelProvider):
def validate_provider_credentials(self, credentials: Mapping) -> None:
"""
Validate provider credentials
if validate failed, raise exception
:param credentials: provider credentials, credentials form defined in `provider_credential_schema`.
"""
pass

View File

@@ -0,0 +1,276 @@
provider: openai_api_compatible
label:
en_US: OpenAI-API-compatible
description:
en_US: Model providers compatible with OpenAI's API standard, such as LM Studio.
zh_Hans: 兼容 OpenAI API 的模型供应商,例如 LM Studio 。
icon_small:
en_US: icon.svg
supported_model_types:
- llm
- rerank
- text-embedding
- speech2text
- tts
configurate_methods:
- customizable-model
model_credential_schema:
model:
label:
en_US: Model Name
zh_Hans: 模型名称
placeholder:
en_US: Enter full model name
zh_Hans: 输入模型全称
credential_form_schemas:
- variable: display_name
label:
en_US: Model display name
zh_Hans: 模型显示名称
type: text-input
required: false
placeholder:
zh_Hans: 模型在界面的显示名称
en_US: The display name of the model in the interface.
- variable: api_key
label:
en_US: API Key
type: secret-input
required: false
placeholder:
zh_Hans: 在此输入您的 API Key
en_US: Enter your API Key
- variable: endpoint_url
label:
zh_Hans: API endpoint URL
en_US: API endpoint URL
type: text-input
required: true
placeholder:
zh_Hans: Base URL, e.g. https://api.openai.com/v1
en_US: Base URL, e.g. https://api.openai.com/v1
- variable: endpoint_model_name
label:
zh_Hans: API endpoint中的模型名称
en_US: model name for API endpoint
type: text-input
required: false
placeholder:
zh_Hans: endpoint model name, e.g. chatgpt4.0
en_US: endpoint model name, e.g. chatgpt4.0
- variable: mode
show_on:
- variable: __model_type
value: llm
label:
en_US: Completion mode
type: select
required: false
default: chat
placeholder:
zh_Hans: 选择对话类型
en_US: Select completion mode
options:
- value: completion
label:
en_US: Completion
zh_Hans: 补全
- value: chat
label:
en_US: Chat
zh_Hans: 对话
- variable: context_size
label:
zh_Hans: 模型上下文长度
en_US: Model context size
required: true
show_on:
- variable: __model_type
value: llm
type: text-input
default: "4096"
placeholder:
zh_Hans: 在此输入您的模型上下文长度
en_US: Enter your Model context size
- variable: context_size
label:
zh_Hans: 模型上下文长度
en_US: Model context size
required: true
show_on:
- variable: __model_type
value: text-embedding
type: text-input
default: "4096"
placeholder:
zh_Hans: 在此输入您的模型上下文长度
en_US: Enter your Model context size
- variable: context_size
label:
zh_Hans: 模型上下文长度
en_US: Model context size
required: true
show_on:
- variable: __model_type
value: rerank
type: text-input
default: "4096"
placeholder:
zh_Hans: 在此输入您的模型上下文长度
en_US: Enter your Model context size
- variable: max_tokens_to_sample
label:
zh_Hans: 最大 token 上限
en_US: Upper bound for max tokens
show_on:
- variable: __model_type
value: llm
default: "4096"
type: text-input
- variable: agent_though_support
show_on:
- variable: __model_type
value: llm
label:
en_US: Agent Thought
type: select
required: false
default: not_supported
options:
- value: supported
label:
en_US: Support
zh_Hans: 支持
- value: not_supported
label:
en_US: Not Support
zh_Hans: 不支持
- variable: function_calling_type
show_on:
- variable: __model_type
value: llm
label:
en_US: Function calling
type: select
required: false
default: no_call
options:
- value: function_call
label:
en_US: Function Call
zh_Hans: Function Call
- value: tool_call
label:
en_US: Tool Call
zh_Hans: Tool Call
- value: no_call
label:
en_US: Not Support
zh_Hans: 不支持
- variable: stream_function_calling
show_on:
- variable: __model_type
value: llm
label:
en_US: Stream function calling
type: select
required: false
default: not_supported
options:
- value: supported
label:
en_US: Support
zh_Hans: 支持
- value: not_supported
label:
en_US: Not Support
zh_Hans: 不支持
- variable: vision_support
show_on:
- variable: __model_type
value: llm
label:
zh_Hans: Vision 支持
en_US: Vision Support
type: select
required: false
default: no_support
options:
- value: support
label:
en_US: Support
zh_Hans: 支持
- value: no_support
label:
en_US: Not Support
zh_Hans: 不支持
- variable: structured_output_support
show_on:
- variable: __model_type
value: llm
label:
en_US: Structured Output
type: select
required: false
default: not_supported
options:
- value: supported
label:
en_US: Support
zh_Hans: 支持
- value: not_supported
label:
en_US: Not Support
zh_Hans: 不支持
- variable: stream_mode_auth
show_on:
- variable: __model_type
value: llm
label:
en_US: Stream mode auth
type: select
required: false
default: not_use
options:
- value: use
label:
en_US: Use
zh_Hans: 使用
- value: not_use
label:
en_US: Not Use
zh_Hans: 不使用
- variable: stream_mode_delimiter
label:
zh_Hans: 流模式返回结果的分隔符
en_US: Delimiter for streaming results
show_on:
- variable: __model_type
value: llm
default: '\n\n'
type: text-input
- variable: voices
show_on:
- variable: __model_type
value: tts
label:
en_US: Available Voices (comma-separated)
zh_Hans: 可用声音(用英文逗号分隔)
type: text-input
required: false
default: "alloy"
placeholder:
en_US: "alloy,echo,fable,onyx,nova,shimmer"
zh_Hans: "alloy,echo,fable,onyx,nova,shimmer"
help:
en_US: "List voice names separated by commas. First voice will be used as default."
zh_Hans: "用英文逗号分隔的声音列表。第一个声音将作为默认值。"
extra:
python:
provider_source: provider/openai_api_compatible.py
model_sources:
- "models/llm/llm.py"
- "models/text_embedding/text_embedding.py"
- "models/rerank/rerank.py"
- "models/speech2text/speech2text.py"
- "models/tts/tts.py"

21
pyproject.toml Normal file
View File

@@ -0,0 +1,21 @@
[project]
name = "openai-api-compatible-GnRzR6cEy"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.12"
# uv pip compile pyproject.toml -o ./requirements.txt
dependencies = [
"dify-plugin>=0.4.3",
"openai>=1.99.9",
]
# uv run black . -C -l 100
# uv run ruff check --fix
[dependency-groups]
dev = [
"black>=25.1.0",
"pytest>=8.4.1",
"ruff>=0.12.5",
]

127
requirements.txt Normal file
View File

@@ -0,0 +1,127 @@
# This file was autogenerated by uv via the following command:
# uv pip compile pyproject.toml -o ./requirements.txt
annotated-types==0.7.0
# via pydantic
anyio==4.10.0
# via
# httpx
# openai
blinker==1.9.0
# via flask
certifi==2025.8.3
# via
# httpcore
# httpx
# requests
cffi==1.17.1
# via gevent
charset-normalizer==3.4.3
# via requests
click==8.2.1
# via flask
colorama==0.4.6
# via
# click
# tqdm
dify-plugin==0.4.3
# via openai-api-compatible-gnrzr6cey (pyproject.toml)
distro==1.9.0
# via openai
dpkt==1.9.8
# via dify-plugin
flask==3.0.3
# via dify-plugin
gevent==25.5.1
# via dify-plugin
greenlet==3.2.4
# via gevent
h11==0.16.0
# via httpcore
httpcore==1.0.9
# via httpx
httpx==0.28.1
# via
# dify-plugin
# openai
idna==3.10
# via
# anyio
# httpx
# requests
# yarl
itsdangerous==2.2.0
# via flask
jinja2==3.1.6
# via flask
jiter==0.10.0
# via openai
markupsafe==3.0.2
# via
# jinja2
# werkzeug
multidict==6.6.4
# via yarl
openai==1.99.9
# via openai-api-compatible-gnrzr6cey (pyproject.toml)
packaging==25.0
# via dify-plugin
propcache==0.3.2
# via yarl
pycparser==2.22
# via cffi
pydantic==2.11.7
# via
# dify-plugin
# openai
# pydantic-settings
pydantic-core==2.33.2
# via pydantic
pydantic-settings==2.10.1
# via dify-plugin
python-dotenv==1.1.1
# via pydantic-settings
pyyaml==6.0.2
# via dify-plugin
regex==2025.7.34
# via tiktoken
requests==2.32.4
# via
# dify-plugin
# tiktoken
setuptools==80.9.0
# via
# zope-event
# zope-interface
sniffio==1.3.1
# via
# anyio
# openai
socksio==1.0.0
# via dify-plugin
tiktoken==0.8.0
# via dify-plugin
tqdm==4.67.1
# via openai
typing-extensions==4.14.1
# via
# anyio
# openai
# pydantic
# pydantic-core
# typing-inspection
typing-inspection==0.4.1
# via
# pydantic
# pydantic-settings
urllib3==2.5.0
# via requests
werkzeug==3.0.6
# via
# dify-plugin
# flask
yarl==1.20.1
# via dify-plugin
zope-event==5.1.1
# via gevent
zope-interface==7.2
# via gevent