diff --git a/df-llm-agent/database/init.sql b/df-llm-agent/database/init.sql index f831eaa..2151501 100644 --- a/df-llm-agent/database/init.sql +++ b/df-llm-agent/database/init.sql @@ -93,10 +93,3 @@ INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'b INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'zhipu','zhipuai','enable',0); INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'zhipu','zhipuai','api_key',''); INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'zhipu','zhipuai','engine_name','chatglm_turbo'); - - -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'lixiang','gpt','enable',0); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'lixiang','gpt','api_key',''); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'lixiang','gpt','api_type','azure'); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'lixiang','gpt','api_base',''); -INSERT INTO llm_config (`user_id`,`platform`,`model`,`key`,`value`) VALUES (1,'lixiang','gpt','api_version',''); \ No newline at end of file diff --git a/df-llm-agent/llm_agent_app/llm_agent.py b/df-llm-agent/llm_agent_app/llm_agent.py index 2f1c6ac..bdd5d55 100644 --- a/df-llm-agent/llm_agent_app/llm_agent.py +++ b/df-llm-agent/llm_agent_app/llm_agent.py @@ -104,7 +104,7 @@ class llmAgentWorker(object): # llm返回的原生数据或请求异常信息 output_all = [] - lixiang = {} + custom_llm = {} # agent azure_client = None @@ -132,13 +132,21 @@ async def num_tokens_from_messages(cls, messages, model="gpt-3.5-turbo-0613"): tokens_per_message = 3 tokens_per_name = 1 elif model == "gpt-3.5-turbo-0301": - tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n + tokens_per_message = ( + 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n + ) tokens_per_name = -1 # if there's a name, the role is omitted elif "gpt-3.5-turbo" in model: - print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.") - return await cls.num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613") + print( + "Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613." + ) + return await cls.num_tokens_from_messages( + messages, model="gpt-3.5-turbo-0613" + ) elif "gpt-4" in model: - print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.") + print( + "Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613." + ) return await cls.num_tokens_from_messages(messages, model="gpt-4-0613") else: raise NotImplementedError( @@ -164,7 +172,9 @@ async def chat_add(cls): chat_data["input"] = cls.user_question # chat_data["output"] = "" - res = await chat_record_worker.chat_record_add(user_info=cls.user_info, args={}, data=chat_data) + res = await chat_record_worker.chat_record_add( + user_info=cls.user_info, args={}, data=chat_data + ) cls.res_chat_id = res.get("res_chat_id", 0) @@ -175,18 +185,29 @@ async def chat_add(cls): @classmethod async def chat_up(cls): chat_data = {} - chat_data["output"] = ''.join(cls.output) + chat_data["output"] = "".join(cls.output) chat_data["output_all"] = cls.output_all - await chat_record_worker.chat_record_update(user_info=cls.user_info, res_chat_id=cls.res_chat_id, data=chat_data) + await chat_record_worker.chat_record_update( + user_info=cls.user_info, res_chat_id=cls.res_chat_id, data=chat_data + ) # 基础参数配置 @classmethod - async def assistant_base(cls, request, user_info, platform, engine_name, prompt_type, args, data): + async def assistant_base( + cls, request, user_info, platform, engine_name, prompt_type, args, data + ): # user_id = user_info.get("ID", 0) - if not isinstance(data, dict) or "user_content" not in data or "system_content" not in data: - raise BadRequestException("INVALID_POST_DATA", f"{const.INVALID_PARAMETERS}, 缺失user_content或system_content参数") + if ( + not isinstance(data, dict) + or "user_content" not in data + or "system_content" not in data + ): + raise BadRequestException( + "INVALID_POST_DATA", + f"{const.INVALID_PARAMETERS}, 缺失user_content或system_content参数", + ) # cls.query = [{"role": "user", "content": data["user_content"]}] @@ -194,7 +215,14 @@ async def assistant_base(cls, request, user_info, platform, engine_name, prompt_ cls.system_content = data["system_content"] if platform == "baidu" or platform == "zhipu": - cls.query = [{"role": "user", "content": cls.system_content}, {"role": "assistant", "content": "好的,后面的回复将按照你给我的角色和要求来解答"}, {"role": "user", "content": data["question"]}] + cls.query = [ + {"role": "user", "content": cls.system_content}, + { + "role": "assistant", + "content": "好的,后面的回复将按照你给我的角色和要求来解答", + }, + {"role": "user", "content": data["question"]}, + ] # 获取配置 data_info = {} @@ -206,17 +234,20 @@ async def assistant_base(cls, request, user_info, platform, engine_name, prompt_ if hasattr(config, "platforms"): res_config = config.platforms for _info in res_config: - if _info.get('platform', '') == platform and _info.get('enable', False): - _engine_name = _info.get('engine_name', []) + if _info.get("platform", "") == platform and _info.get("enable", False): + _engine_name = _info.get("engine_name", []) if engine_name in _engine_name: - _info['engine_name'] = f"{engine_name}" + _info["engine_name"] = f"{engine_name}" else: - _info['engine_name'] = '' + _info["engine_name"] = "" engine_config = _info if not engine_config.get("enable", False): - raise BadRequestException("INVALID_PARAMETERS", f"{const.INVALID_PARAMETERS}, 平台: {platform} 未启用") + raise BadRequestException( + "INVALID_PARAMETERS", + f"{const.INVALID_PARAMETERS}, 平台: {platform} 未启用", + ) else: try: @@ -237,18 +268,33 @@ async def assistant_base(cls, request, user_info, platform, engine_name, prompt_ engine_config[_key] = _value if engine_config.get("enable", "") != "1": - raise BadRequestException("INVALID_PARAMETERS", f"{const.INVALID_PARAMETERS}, 平台: {platform} 未启用") + raise BadRequestException( + "INVALID_PARAMETERS", + f"{const.INVALID_PARAMETERS}, 平台: {platform} 未启用", + ) # print(engine_config, engine_config.get("engine_name"), engine_name) if engine_config.get("engine_name", "") != engine_name: - raise BadRequestException("INVALID_PARAMETERS", f"{const.INVALID_PARAMETERS}, 引用的引擎错误: {engine_name}") + raise BadRequestException( + "INVALID_PARAMETERS", + f"{const.INVALID_PARAMETERS}, 引用的引擎错误: {engine_name}", + ) # 检查配置项目 if platform == "azure": - for key in ("api_key", "api_type", "api_base", "api_version", "engine_name"): + for key in ( + "api_key", + "api_type", + "api_base", + "api_version", + "engine_name", + ): if key not in engine_config or engine_config.get(f"{key}", "") == "": - raise BadRequestException("DATA_NOT_FOUND", f"{const.DATA_NOT_FOUND}: 请确认{key}已经正确配置") + raise BadRequestException( + "DATA_NOT_FOUND", + f"{const.DATA_NOT_FOUND}: 请确认{key}已经正确配置", + ) # 组件 if prompt_type == "langchain": @@ -257,13 +303,13 @@ async def assistant_base(cls, request, user_info, platform, engine_name, prompt_ deployment_name=engine_config.get("engine_name"), openai_api_version=engine_config.get("api_version"), openai_api_type=engine_config.get("api_type"), - openai_api_key=engine_config.get("api_key") + openai_api_key=engine_config.get("api_key"), ) else: cls.azure_client = AsyncAzureOpenAI( api_key=engine_config.get("api_key"), api_version=engine_config.get("api_version"), - azure_endpoint=engine_config.get("api_base") + azure_endpoint=engine_config.get("api_base"), ) cls.engine_name = engine_config.get("engine_name") @@ -271,7 +317,10 @@ async def assistant_base(cls, request, user_info, platform, engine_name, prompt_ elif platform == "openai": for key in ("api_key", "engine_name"): if key not in engine_config or engine_config.get(f"{key}", "") == "": - raise BadRequestException("DATA_NOT_FOUND", f"{const.DATA_NOT_FOUND}: 请确认{key}已经正确配置") + raise BadRequestException( + "DATA_NOT_FOUND", + f"{const.DATA_NOT_FOUND}: 请确认{key}已经正确配置", + ) openai.api_key = engine_config.get("api_key") cls.engine_name = engine_config.get("engine_name") @@ -279,7 +328,10 @@ async def assistant_base(cls, request, user_info, platform, engine_name, prompt_ elif platform == "aliyun": for key in ("api_key", "engine_name"): if key not in engine_config or engine_config.get(f"{key}", "") == "": - raise BadRequestException("DATA_NOT_FOUND", f"{const.DATA_NOT_FOUND}: 请确认{key}已经正确配置") + raise BadRequestException( + "DATA_NOT_FOUND", + f"{const.DATA_NOT_FOUND}: 请确认{key}已经正确配置", + ) dashscope.api_key = engine_config.get("api_key") cls.engine_name = engine_config.get("engine_name") @@ -287,7 +339,10 @@ async def assistant_base(cls, request, user_info, platform, engine_name, prompt_ elif platform == "baidu": for key in ("api_key", "api_secre", "engine_name"): if key not in engine_config or engine_config.get(f"{key}", "") == "": - raise BadRequestException("DATA_NOT_FOUND", f"{const.DATA_NOT_FOUND}: 请确认{key}已经正确配置") + raise BadRequestException( + "DATA_NOT_FOUND", + f"{const.DATA_NOT_FOUND}: 请确认{key}已经正确配置", + ) qianfan.AK(engine_config.get("api_key")) qianfan.SK(engine_config.get("api_secre")) @@ -296,25 +351,40 @@ async def assistant_base(cls, request, user_info, platform, engine_name, prompt_ elif platform == "zhipu": for key in ("api_key", "engine_name"): if key not in engine_config or engine_config.get(f"{key}", "") == "": - raise BadRequestException("DATA_NOT_FOUND", f"{const.DATA_NOT_FOUND}: 请确认{key}已经正确配置") + raise BadRequestException( + "DATA_NOT_FOUND", + f"{const.DATA_NOT_FOUND}: 请确认{key}已经正确配置", + ) zhipuai.api_key = engine_config.get("api_key") cls.engine_name = engine_config.get("engine_name") - elif platform == "lixiang": - for key in ("api_key", "api_type", "api_base", "api_version", "engine_name"): + elif platform == "custom_llm": + for key in ( + "api_key", + "api_type", + "api_base", + "api_version", + "engine_name", + ): if key not in engine_config or engine_config.get(f"{key}", "") == "": - raise BadRequestException("DATA_NOT_FOUND", f"{const.DATA_NOT_FOUND}: 请确认{key}已经正确配置") - - lixiang_config = {} - lixiang_config["api_key"] = engine_config.get("api_key") - lixiang_config["api_type"] = engine_config.get("api_type") - lixiang_config["api_base"] = engine_config.get("api_base") - lixiang_config["api_version"] = engine_config.get("api_version") - cls.lixiang = lixiang_config + raise BadRequestException( + "DATA_NOT_FOUND", + f"{const.DATA_NOT_FOUND}: 请确认{key}已经正确配置", + ) + + custom_llm_config = {} + custom_llm_config["api_key"] = engine_config.get("api_key") + custom_llm_config["api_type"] = engine_config.get("api_type") + custom_llm_config["api_base"] = engine_config.get("api_base") + custom_llm_config["api_version"] = engine_config.get("api_version") + cls.custom_llm = custom_llm_config cls.engine_name = engine_config.get("engine_name") else: - raise BadRequestException("INVALID_PARAMETERS", f"{const.INVALID_PARAMETERS}, 模型所在平台名称错误") + raise BadRequestException( + "INVALID_PARAMETERS", + f"{const.INVALID_PARAMETERS}, 模型所在平台名称错误", + ) # cls.user_info = user_info cls.request = request @@ -329,29 +399,31 @@ async def assistant_base(cls, request, user_info, platform, engine_name, prompt_ if platform == "baidu" or platform == "zhipu": cls.messages = [*cls.query] - if platform == "lixiang": - cls.messages = { - "messages": cls.messages - } + if platform == "custom_llm": + cls.messages = {"messages": cls.messages} conv_history_tokens = 0 if platform == "azure" or platform == "openai": try: conv_history_tokens = await cls.num_tokens_from_messages(cls.messages) except Exception as e: - raise BadRequestException("FAIL", f"{const.FAIL}: 计算token数量错误: {e}") + raise BadRequestException( + "FAIL", f"{const.FAIL}: 计算token数量错误: {e}" + ) elif platform == "aliyun": - response_token = dashscope.Tokenization.call(model=cls.engine_name, - messages=cls.messages - ) + response_token = dashscope.Tokenization.call( + model=cls.engine_name, messages=cls.messages + ) if response_token.status_code != HTTPStatus.OK: - raise BadRequestException("FAIL", f"{const.FAIL}: 计算token数量错误: {response_token.message}") + raise BadRequestException( + "FAIL", f"{const.FAIL}: 计算token数量错误: {response_token.message}" + ) usage = response_token.usage - conv_history_tokens = usage.get('input_tokens', 0) + conv_history_tokens = usage.get("input_tokens", 0) print(conv_history_tokens) # 记录会话 @@ -360,10 +432,14 @@ async def assistant_base(cls, request, user_info, platform, engine_name, prompt_ # 流处理 @classmethod - async def assistant_stream(cls, request, user_info, platform, engine_name, prompt_type, args, data): + async def assistant_stream( + cls, request, user_info, platform, engine_name, prompt_type, args, data + ): # 校验 - await cls.assistant_base(request, user_info, platform, engine_name, prompt_type, args, data) + await cls.assistant_base( + request, user_info, platform, engine_name, prompt_type, args, data + ) # 开始时间 working_start_time = datetime.datetime.now() @@ -371,9 +447,13 @@ async def assistant_stream(cls, request, user_info, platform, engine_name, promp if platform == "azure" or platform == "openai": try: if platform == "azure": - response = await cls.azure_client.chat.completions.create(model=cls.engine_name, messages=cls.messages, stream=True) + response = await cls.azure_client.chat.completions.create( + model=cls.engine_name, messages=cls.messages, stream=True + ) else: - response = await openai.ChatCompletion.acreate(engine=cls.engine_name, messages=cls.messages, stream=True) + response = await openai.ChatCompletion.acreate( + engine=cls.engine_name, messages=cls.messages, stream=True + ) except Exception as e: raise BadRequestException("APP_ERROR", const.APP_ERROR, f"{e}") @@ -389,11 +469,13 @@ async def generate_data(output, output_all): # 结束时间 working_end_time = datetime.datetime.now() - all_time = working_end_time.timestamp() - working_start_time.timestamp() + all_time = ( + working_end_time.timestamp() - working_start_time.timestamp() + ) # msg = f"用户: {cls.user_info.get('ID', 0)} 请求gpt开始时间: {working_start_time}, 结束时间: {working_end_time}, 共耗时: {all_time} 秒,返回信息: {item}" msg = {} - msg["user_id"] = cls.user_info.get('ID', 0) + msg["user_id"] = cls.user_info.get("ID", 0) msg["start_time"] = f"{working_start_time}" msg["end_time"] = f"{working_end_time}" msg["all_time"] = all_time @@ -406,7 +488,7 @@ async def generate_data(output, output_all): choices = item_json["choices"] if choices: delta = choices[0].get("delta", {}) - if 'content' in delta: + if "content" in delta: delta_content = delta.get("content", None) if delta_content is not None: content = delta_content @@ -428,9 +510,9 @@ async def generate_data(output, output_all): responses = dashscope.Generation.call( model=cls.engine_name, # Generation.Models.qwen_turbo, messages=cls.messages, - result_format='message', + result_format="message", stream=True, - incremental_output=True + incremental_output=True, ) except Exception as e: raise BadRequestException("APP_ERROR", const.APP_ERROR, f"{e}") @@ -445,11 +527,13 @@ async def generate_data(output, output_all): # 结束时间 working_end_time = datetime.datetime.now() - all_time = working_end_time.timestamp() - working_start_time.timestamp() + all_time = ( + working_end_time.timestamp() - working_start_time.timestamp() + ) # msg = f"用户: {cls.user_info.get('ID', 0)} 请求gpt开始时间: {working_start_time}, 结束时间: {working_end_time}, 共耗时: {all_time} 秒,返回信息: {response}" msg = {} - msg["user_id"] = cls.user_info.get('ID', 0) + msg["user_id"] = cls.user_info.get("ID", 0) msg["start_time"] = f"{working_start_time}" msg["end_time"] = f"{working_end_time}" msg["all_time"] = all_time @@ -461,9 +545,9 @@ async def generate_data(output, output_all): if response.status_code == HTTPStatus.OK: # print(response) item = response.output - if item['choices']: + if item["choices"]: delta = item["choices"][0].get("message", {}) - if 'content' in delta: + if "content" in delta: content = delta.get("content", "") else: content = response.message @@ -484,7 +568,9 @@ async def generate_data(output, output_all): try: chat_comp = qianfan.ChatCompletion() # 指定特定模型 - response = await chat_comp.ado(model=cls.engine_name, messages=cls.messages, stream=True) + response = await chat_comp.ado( + model=cls.engine_name, messages=cls.messages, stream=True + ) except Exception as e: raise BadRequestException("APP_ERROR", const.APP_ERROR, f"{e}") @@ -498,12 +584,14 @@ async def generate_data(output, output_all): # 结束时间 working_end_time = datetime.datetime.now() - all_time = working_end_time.timestamp() - working_start_time.timestamp() + all_time = ( + working_end_time.timestamp() - working_start_time.timestamp() + ) # msg = f"用户: {cls.user_info.get('ID', 0)} 请求gpt开始时间: {working_start_time}, 结束时间: {working_end_time}, 共耗时: {all_time} 秒,返回信息: {item}" msg = {} - msg["user_id"] = cls.user_info.get('ID', 0) + msg["user_id"] = cls.user_info.get("ID", 0) msg["start_time"] = f"{working_start_time}" msg["end_time"] = f"{working_end_time}" msg["all_time"] = all_time @@ -513,12 +601,12 @@ async def generate_data(output, output_all): content = "" - if item.get('code', 0) == HTTPStatus.OK: + if item.get("code", 0) == HTTPStatus.OK: - content = item.get('result', '') + content = item.get("result", "") else: # msg = '返回错误: 请求id: %s, 状态码: %s, 错误信息: %s' % (item.get('id', ''), item.get('code', 0), item.get('result', '')) - content = item.get('result', '') + content = item.get("result", "") output.append(f"{content}") yield content @@ -534,8 +622,7 @@ async def generate_data(output, output_all): try: response = zhipuai.model_api.sse_invoke( - model=cls.engine_name, - prompt=cls.messages + model=cls.engine_name, prompt=cls.messages ) except Exception as e: @@ -552,11 +639,13 @@ async def generate_data(output, output_all): # 结束时间 working_end_time = datetime.datetime.now() - all_time = working_end_time.timestamp() - working_start_time.timestamp() + all_time = ( + working_end_time.timestamp() - working_start_time.timestamp() + ) # msg = f"用户: {cls.user_info.get('ID', 0)} 请求gpt开始时间: {working_start_time}, 结束时间: {working_end_time}, 共耗时: {all_time} 秒,返回信息: " msg = {} - msg["user_id"] = cls.user_info.get('ID', 0) + msg["user_id"] = cls.user_info.get("ID", 0) msg["start_time"] = f"{working_start_time}" msg["end_time"] = f"{working_end_time}" msg["all_time"] = all_time @@ -592,27 +681,34 @@ async def generate_data(output, output_all): return generate_data(output, output_all) - elif platform == "lixiang": + elif platform == "custom_llm": lcuuid = generate_uuid() - headers = {"BCS-APIHub-RequestId": lcuuid, "X-CHJ-GWToken": cls.lixiang["api_key"]} - url = f"{cls.lixiang['api_base']}/bcs-apihub-ai-proxy-service/apihub/openai/{cls.lixiang['api_version']}/{cls.lixiang['api_type']}/models/{cls.engine_name}?stream=true" + headers = { + "BCS-APIHub-RequestId": lcuuid, + "X-CHJ-GWToken": cls.custom_llm["api_key"], + } + url = f"{cls.custom_llm['api_base']}/bcs-apihub-ai-proxy-service/apihub/openai/{cls.custom_llm['api_version']}/{cls.custom_llm['api_type']}/models/{cls.engine_name}?stream=true" try: - response = curl_tools.curl_app_stream("post", url, headers, json.dumps(cls.messages)) + response = curl_tools.curl_app_stream( + "post", url, headers, json.dumps(cls.messages) + ) except BadRequestException as e: - raise BadRequestException("APP_ERROR", f"{const.APP_ERROR}:{e.message}", f"{e}") + raise BadRequestException( + "APP_ERROR", f"{const.APP_ERROR}:{e.message}", f"{e}" + ) output = [] output_all = [] async def generate_data(output, output_all): async for chunked in response: - line = chunked.decode('utf-8').strip() + line = chunked.decode("utf-8").strip() # 找到包含"data:"的部分并提取"data"字段的值 event_data = "" - if line.startswith('data:'): - data_start = len('data:') + if line.startswith("data:"): + data_start = len("data:") data_value = line[data_start:].strip() if data_value != "[DONE]": # 解析JSON格式的数据 @@ -625,11 +721,13 @@ async def generate_data(output, output_all): # 结束时间 working_end_time = datetime.datetime.now() - all_time = working_end_time.timestamp() - working_start_time.timestamp() + all_time = ( + working_end_time.timestamp() - working_start_time.timestamp() + ) # msg = f"用户: {cls.user_info.get('ID', 0)} 请求gpt开始时间: {working_start_time}, 结束时间: {working_end_time}, 共耗时: {all_time} 秒,返回信息: {item}" msg = {} - msg["user_id"] = cls.user_info.get('ID', 0) + msg["user_id"] = cls.user_info.get("ID", 0) msg["start_time"] = f"{working_start_time}" msg["end_time"] = f"{working_end_time}" msg["all_time"] = all_time @@ -640,11 +738,11 @@ async def generate_data(output, output_all): content = "" if event_data or isinstance(event_data, dict): - if event_data.get('code', 0) != 0 or 'data' not in event_data: - content = event_data.get('msg', '') + if event_data.get("code", 0) != 0 or "data" not in event_data: + content = event_data.get("msg", "") else: - data = event_data['data'] - if data['choices']: + data = event_data["data"] + if data["choices"]: choices = data["choices"][0] if isinstance(choices, dict): content = choices.get("content", "") @@ -664,7 +762,9 @@ async def generate_data(output, output_all): @classmethod async def module(cls, request, user_info, platform, engine_name, args, data): # 校验 - await cls.assistant_base(request, user_info, platform, engine_name, 'langchain', args, data) + await cls.assistant_base( + request, user_info, platform, engine_name, "langchain", args, data + ) # 开始时间 working_start_time = datetime.datetime.now() @@ -686,11 +786,7 @@ async def module(cls, request, user_info, platform, engine_name, args, data): 分类:""" ) - chain = ( - prompt - | llm - | output_parser - ) + chain = prompt | llm | output_parser # res = chain.invoke({"question": "如何使用llm?"}) # res = chain.invoke({"question": "如何使用langchain?"}) @@ -748,7 +844,7 @@ async def module(cls, request, user_info, platform, engine_name, args, data): full_chain = {"topic": chain, "question": lambda x: x["question"]} | branch # 问题 - question = cls.query[0]['content'] + question = cls.query[0]["content"] try: # 异步一次性返回 @@ -758,7 +854,7 @@ async def module(cls, request, user_info, platform, engine_name, args, data): working_end_time = datetime.datetime.now() all_time = working_end_time.timestamp() - working_start_time.timestamp() msg = {} - msg["user_id"] = cls.user_info.get('ID', 0) + msg["user_id"] = cls.user_info.get("ID", 0) msg["start_time"] = f"{working_start_time}" msg["end_time"] = f"{working_end_time}" msg["all_time"] = all_time