From 995858733531734e15778a25a4d2ebd0ca5421d4 Mon Sep 17 00:00:00 2001 From: Vishva R <44122284+vishvaRam@users.noreply.github.com> Date: Thu, 25 Dec 2025 15:11:37 +0530 Subject: [PATCH] Update import and model names in README MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This update fixes several critical issues in the Gemini integration example: 1. Corrected import: Changed from gemini_complete to gemini_model_complete (the correct function name per lightrag/llm/gemini.py) 2. Fixed parameter name: Changed 'model' to 'model_name' in gemini_model_complete() call to match the function signature 3. Added llm_model_name to LightRAG initialization: This is required for gemini_model_complete to retrieve the model name from hashing_kv.global_config 4. Updated to latest model: gemini-1.5-flash → gemini-2.0-flash Without these changes, users get "404 NOT_FOUND" errors as the code defaults to gpt-4o-mini when model_name is not properly configured. Tested and verified working with Gemini 2.0 Flash API. --- README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 0ab21759..090b2d99 100644 --- a/README.md +++ b/README.md @@ -718,18 +718,18 @@ If you want to use Google Gemini models, you only need to set up LightRAG as fol import os import numpy as np from lightrag.utils import wrap_embedding_func_with_attrs -from lightrag.llm.gemini import gemini_complete, gemini_embed +from lightrag.llm.gemini import gemini_model_complete, gemini_embed # Configure the generation model async def llm_model_func( prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs ) -> str: - return await gemini_complete( + return await gemini_model_complete( prompt, system_prompt=system_prompt, history_messages=history_messages, api_key=os.getenv("GEMINI_API_KEY"), - model="gemini-1.5-flash", + model_name="gemini-2.0-flash", **kwargs ) @@ -749,6 +749,7 @@ async def embedding_func(texts: list[str]) -> np.ndarray: rag = LightRAG( working_dir=WORKING_DIR, llm_model_func=llm_model_func, + llm_model_name="gemini-2.0-flash", embedding_func=embedding_func ) ```