Merge pull request #2537 from vishvaRam/patch-1

Fixes the Gemini integration example in the README
This commit is contained in:
Daniel.y
2025-12-26 14:20:54 +08:00
committed by GitHub

View File

@@ -718,18 +718,18 @@ If you want to use Google Gemini models, you only need to set up LightRAG as fol
import os import os
import numpy as np import numpy as np
from lightrag.utils import wrap_embedding_func_with_attrs from lightrag.utils import wrap_embedding_func_with_attrs
from lightrag.llm.gemini import gemini_complete, gemini_embed from lightrag.llm.gemini import gemini_model_complete, gemini_embed
# Configure the generation model # Configure the generation model
async def llm_model_func( async def llm_model_func(
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
) -> str: ) -> str:
return await gemini_complete( return await gemini_model_complete(
prompt, prompt,
system_prompt=system_prompt, system_prompt=system_prompt,
history_messages=history_messages, history_messages=history_messages,
api_key=os.getenv("GEMINI_API_KEY"), api_key=os.getenv("GEMINI_API_KEY"),
model="gemini-1.5-flash", model_name="gemini-2.0-flash",
**kwargs **kwargs
) )
@@ -749,6 +749,7 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
rag = LightRAG( rag = LightRAG(
working_dir=WORKING_DIR, working_dir=WORKING_DIR,
llm_model_func=llm_model_func, llm_model_func=llm_model_func,
llm_model_name="gemini-2.0-flash",
embedding_func=embedding_func embedding_func=embedding_func
) )
``` ```