From 6e36ff41e16dbe7549074b6329404cd8d25c90cd Mon Sep 17 00:00:00 2001 From: yangdx Date: Thu, 6 Nov 2025 16:01:24 +0800 Subject: [PATCH] Fix linting --- lightrag/api/lightrag_server.py | 4 +--- lightrag/llm/binding_options.py | 4 ++-- lightrag/llm/gemini.py | 4 +++- requirements-offline-llm.txt | 2 +- requirements-offline.txt | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index 24d590d9..70e17bb6 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -512,9 +512,7 @@ def create_app(args): return optimized_azure_openai_model_complete - def create_optimized_gemini_llm_func( - config_cache: LLMConfigCache, args - ): + def create_optimized_gemini_llm_func(config_cache: LLMConfigCache, args): """Create optimized Gemini LLM function with cached configuration""" async def optimized_gemini_model_complete( diff --git a/lightrag/llm/binding_options.py b/lightrag/llm/binding_options.py index e2f94649..44ab5d2f 100644 --- a/lightrag/llm/binding_options.py +++ b/lightrag/llm/binding_options.py @@ -228,7 +228,7 @@ class BindingOptions: argdef = { "argname": f"{args_prefix}-{field.name}", "env_name": f"{env_var_prefix}{field.name.upper()}", - "type": _resolve_optional_type(field.type), + "type": _resolve_optional_type(field.type), "default": default_value, "help": f"{cls._binding_name} -- " + help.get(field.name, ""), } @@ -498,7 +498,7 @@ class GeminiLLMOptions(BindingOptions): "candidate_count": "Number of candidates returned per request", "presence_penalty": "Penalty for token presence (-2.0 to 2.0)", "frequency_penalty": "Penalty for token frequency (-2.0 to 2.0)", - "stop_sequences": 'Stop sequences (JSON array of strings, e.g., \'["END"]\')', + "stop_sequences": "Stop sequences (JSON array of strings, e.g., '[\"END\"]')", "response_mime_type": "Desired MIME type for the response (e.g., application/json)", "safety_settings": "JSON object with Gemini safety settings overrides", "system_instruction": "Default system instruction applied to every request", diff --git a/lightrag/llm/gemini.py b/lightrag/llm/gemini.py index 14a1b238..b8c64b31 100644 --- a/lightrag/llm/gemini.py +++ b/lightrag/llm/gemini.py @@ -191,7 +191,9 @@ async def gemini_complete_if_cache( usage = getattr(chunk, "usage_metadata", None) if usage is not None: usage_container["usage"] = usage - text_piece = getattr(chunk, "text", None) or _extract_response_text(chunk) + text_piece = getattr(chunk, "text", None) or _extract_response_text( + chunk + ) if text_piece: loop.call_soon_threadsafe(queue.put_nowait, text_piece) loop.call_soon_threadsafe(queue.put_nowait, None) diff --git a/requirements-offline-llm.txt b/requirements-offline-llm.txt index b4d68b6b..4e8b7168 100644 --- a/requirements-offline-llm.txt +++ b/requirements-offline-llm.txt @@ -10,9 +10,9 @@ # LLM provider dependencies (with version constraints matching pyproject.toml) aioboto3>=12.0.0,<16.0.0 anthropic>=0.18.0,<1.0.0 +google-genai>=1.0.0,<2.0.0 llama-index>=0.9.0,<1.0.0 ollama>=0.1.0,<1.0.0 openai>=1.0.0,<3.0.0 -google-genai>=1.0.0,<2.0.0 voyageai>=0.2.0,<1.0.0 zhipuai>=2.0.0,<3.0.0 diff --git a/requirements-offline.txt b/requirements-offline.txt index b9e2563e..8dfb1b01 100644 --- a/requirements-offline.txt +++ b/requirements-offline.txt @@ -13,13 +13,13 @@ anthropic>=0.18.0,<1.0.0 # Storage backend dependencies asyncpg>=0.29.0,<1.0.0 +google-genai>=1.0.0,<2.0.0 # Document processing dependencies llama-index>=0.9.0,<1.0.0 neo4j>=5.0.0,<7.0.0 ollama>=0.1.0,<1.0.0 openai>=1.0.0,<3.0.0 -google-genai>=1.0.0,<2.0.0 openpyxl>=3.0.0,<4.0.0 pycryptodome>=3.0.0,<4.0.0 pymilvus>=2.6.2,<3.0.0