1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
| SPARKAI_URL = 'wss://spark-api.xf-yun.com/v3.1/chat' SPARKAI_APP_ID = '9829fafe' SPARKAI_API_SECRET = 'MmY2ZmM3ZjVjZWY4OTc3MWZiMWQyMTc2' SPARKAI_API_KEY = 'a5491fb8cd0637837e2e0016b85cd448' SPARKAI_DOMAIN = 'generalv3' from typing import Optional, List, Mapping, Any
from llama_index.core.callbacks import CallbackManager from llama_index.core.llms import ( CustomLLM, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.core.llms.callbacks import llm_completion_callback from sparkai.llm.llm import ChatSparkLLM, ChunkPrintHandler from sparkai.core.messages import ChatMessage
class SparkLLM(CustomLLM): dummy_response: str = "My response"
@property def metadata(self) -> LLMMetadata: """Get LLM metadata.""" return LLMMetadata()
@llm_completion_callback() def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: spark = ChatSparkLLM( spark_api_url=SPARKAI_URL, spark_app_id=SPARKAI_APP_ID, spark_api_key=SPARKAI_API_KEY, spark_api_secret=SPARKAI_API_SECRET, spark_llm_domain=SPARKAI_DOMAIN, streaming=False, ) messages = [ChatMessage( role="user", content=prompt )] handler = ChunkPrintHandler() a = spark.generate([messages], callbacks=[handler]) self.dummy_response = a.generations[0][0].text return CompletionResponse(text=self.dummy_response)
@llm_completion_callback() def stream_complete( self, prompt: str, **kwargs: Any) -> CompletionResponseGen: response = "" for token in self.dummy_response: response += token yield CompletionResponse(text=response, delta=token)
llm = SparkLLM() llm.complete('hello')
|