Fix linting
This commit is contained in:
@@ -512,9 +512,7 @@ def create_app(args):
|
||||
|
||||
return optimized_azure_openai_model_complete
|
||||
|
||||
def create_optimized_gemini_llm_func(
|
||||
config_cache: LLMConfigCache, args
|
||||
):
|
||||
def create_optimized_gemini_llm_func(config_cache: LLMConfigCache, args):
|
||||
"""Create optimized Gemini LLM function with cached configuration"""
|
||||
|
||||
async def optimized_gemini_model_complete(
|
||||
|
||||
@@ -228,7 +228,7 @@ class BindingOptions:
|
||||
argdef = {
|
||||
"argname": f"{args_prefix}-{field.name}",
|
||||
"env_name": f"{env_var_prefix}{field.name.upper()}",
|
||||
"type": _resolve_optional_type(field.type),
|
||||
"type": _resolve_optional_type(field.type),
|
||||
"default": default_value,
|
||||
"help": f"{cls._binding_name} -- " + help.get(field.name, ""),
|
||||
}
|
||||
@@ -498,7 +498,7 @@ class GeminiLLMOptions(BindingOptions):
|
||||
"candidate_count": "Number of candidates returned per request",
|
||||
"presence_penalty": "Penalty for token presence (-2.0 to 2.0)",
|
||||
"frequency_penalty": "Penalty for token frequency (-2.0 to 2.0)",
|
||||
"stop_sequences": 'Stop sequences (JSON array of strings, e.g., \'["END"]\')',
|
||||
"stop_sequences": "Stop sequences (JSON array of strings, e.g., '[\"END\"]')",
|
||||
"response_mime_type": "Desired MIME type for the response (e.g., application/json)",
|
||||
"safety_settings": "JSON object with Gemini safety settings overrides",
|
||||
"system_instruction": "Default system instruction applied to every request",
|
||||
|
||||
@@ -191,7 +191,9 @@ async def gemini_complete_if_cache(
|
||||
usage = getattr(chunk, "usage_metadata", None)
|
||||
if usage is not None:
|
||||
usage_container["usage"] = usage
|
||||
text_piece = getattr(chunk, "text", None) or _extract_response_text(chunk)
|
||||
text_piece = getattr(chunk, "text", None) or _extract_response_text(
|
||||
chunk
|
||||
)
|
||||
if text_piece:
|
||||
loop.call_soon_threadsafe(queue.put_nowait, text_piece)
|
||||
loop.call_soon_threadsafe(queue.put_nowait, None)
|
||||
|
||||
@@ -10,9 +10,9 @@
|
||||
# LLM provider dependencies (with version constraints matching pyproject.toml)
|
||||
aioboto3>=12.0.0,<16.0.0
|
||||
anthropic>=0.18.0,<1.0.0
|
||||
google-genai>=1.0.0,<2.0.0
|
||||
llama-index>=0.9.0,<1.0.0
|
||||
ollama>=0.1.0,<1.0.0
|
||||
openai>=1.0.0,<3.0.0
|
||||
google-genai>=1.0.0,<2.0.0
|
||||
voyageai>=0.2.0,<1.0.0
|
||||
zhipuai>=2.0.0,<3.0.0
|
||||
|
||||
@@ -13,13 +13,13 @@ anthropic>=0.18.0,<1.0.0
|
||||
|
||||
# Storage backend dependencies
|
||||
asyncpg>=0.29.0,<1.0.0
|
||||
google-genai>=1.0.0,<2.0.0
|
||||
|
||||
# Document processing dependencies
|
||||
llama-index>=0.9.0,<1.0.0
|
||||
neo4j>=5.0.0,<7.0.0
|
||||
ollama>=0.1.0,<1.0.0
|
||||
openai>=1.0.0,<3.0.0
|
||||
google-genai>=1.0.0,<2.0.0
|
||||
openpyxl>=3.0.0,<4.0.0
|
||||
pycryptodome>=3.0.0,<4.0.0
|
||||
pymilvus>=2.6.2,<3.0.0
|
||||
|
||||
Reference in New Issue
Block a user