diff --git a/dbgpt/agent/agents/agent.py b/dbgpt/agent/agents/agent.py index 432e351db..f1d82abde 100644 --- a/dbgpt/agent/agents/agent.py +++ b/dbgpt/agent/agents/agent.py @@ -8,6 +8,7 @@ from ..memory.gpts_memory import GptsMemory from dbgpt.core.interface.llm import ModelMetadata + class Agent: """ An interface for AI agent. @@ -166,7 +167,7 @@ def to_dict(self) -> Dict[str, Any]: @dataclass class AgentContext: conv_id: str - llm_provider: Optional['LLMClient'] + llm_provider: Optional["LLMClient"] gpts_name: Optional[str] = None resource_db: Optional[AgentResource] = None diff --git a/dbgpt/agent/agents/base_agent.py b/dbgpt/agent/agents/base_agent.py index ec58c00be..b1d238acd 100644 --- a/dbgpt/agent/agents/base_agent.py +++ b/dbgpt/agent/agents/base_agent.py @@ -636,7 +636,6 @@ def _get_model_priority(self): else: return None - def _filter_health_models(self, need_uses: Optional[list]): all_modes = self.agent_context.llm_models can_uses = [] @@ -645,7 +644,6 @@ def _filter_health_models(self, need_uses: Optional[list]): can_uses.append(item) return can_uses - def _select_llm_model(self, old_model: str = None): """ LLM model selector, currently only supports manual selection, more strategies will be opened in the future @@ -662,7 +660,7 @@ def _select_llm_model(self, old_model: str = None): if old_model: filtered_list = [item for item in all_modes if item.model != old_model] if filtered_list and len(filtered_list) >= 1: - now_model= filtered_list[0] + now_model = filtered_list[0] return now_model.model async def a_reasoning_reply( diff --git a/dbgpt/agent/agents/expand/code_assistant_agent.py b/dbgpt/agent/agents/expand/code_assistant_agent.py index aab78254b..0525b43c9 100644 --- a/dbgpt/agent/agents/expand/code_assistant_agent.py +++ b/dbgpt/agent/agents/expand/code_assistant_agent.py @@ -171,7 +171,7 @@ async def a_verify(self, message: Optional[Dict]): check_reult, model = await self.a_reasoning_reply( [ { - "role": ModelMessageRoleType.HUMAN, + "role": ModelMessageRoleType.HUMAN, "content": f"""Please understand the following task objectives and results and give your judgment: Task Gogal: {task_gogal} Execution Result: {task_result} diff --git a/dbgpt/agent/agents/expand/plugin_assistant_agent.py b/dbgpt/agent/agents/expand/plugin_assistant_agent.py index d59bbff21..5bb857176 100644 --- a/dbgpt/agent/agents/expand/plugin_assistant_agent.py +++ b/dbgpt/agent/agents/expand/plugin_assistant_agent.py @@ -113,9 +113,7 @@ async def tool_call( ### Answer failed, turn on automatic repair rensponse_succ = False else: - try: - view = "" except Exception as e: view = f"```vis-convert-error\n{content}\n```" diff --git a/dbgpt/agent/agents/llm/llm_client.py b/dbgpt/agent/agents/llm/llm_client.py index 17a0cfdd5..941894944 100644 --- a/dbgpt/agent/agents/llm/llm_client.py +++ b/dbgpt/agent/agents/llm/llm_client.py @@ -181,7 +181,9 @@ async def _completions_create(self, llm_model, params): try: model_request = _build_model_request(payload) model_output = await self._llm_client.generate(model_request) - parsed_output = self._output_parser.parse_model_nostream_resp(model_output, "###") + parsed_output = self._output_parser.parse_model_nostream_resp( + model_output, "###" + ) return parsed_output except Exception as e: logger.error( diff --git a/dbgpt/agent/agents/plan_group_chat.py b/dbgpt/agent/agents/plan_group_chat.py index 164882a3c..7d4a33638 100644 --- a/dbgpt/agent/agents/plan_group_chat.py +++ b/dbgpt/agent/agents/plan_group_chat.py @@ -226,7 +226,6 @@ def __init__( max_consecutive_auto_reply: Optional[int] = sys.maxsize, human_input_mode: Optional[str] = "NEVER", describe: Optional[str] = "Plan chat manager.", - **kwargs, ): super().__init__( @@ -431,7 +430,6 @@ async def a_run_chat( plan_result, ) except Exception as e: - logger.exception( f"An exception was encountered during the execution of the current plan step.{str(e)}" ) diff --git a/dbgpt/agent/agents/planner_agent.py b/dbgpt/agent/agents/planner_agent.py index eb84a745e..435aef2a8 100644 --- a/dbgpt/agent/agents/planner_agent.py +++ b/dbgpt/agent/agents/planner_agent.py @@ -86,7 +86,6 @@ def __init__( is_termination_msg: Optional[Callable[[Dict], bool]] = None, max_consecutive_auto_reply: Optional[int] = None, human_input_mode: Optional[str] = "NEVER", - **kwargs, ): super().__init__( diff --git a/dbgpt/agent/plugin/commands/built_in/disply_type/__init__.py b/dbgpt/agent/plugin/commands/built_in/disply_type/__init__.py index 4a7d79f7d..a13495925 100644 --- a/dbgpt/agent/plugin/commands/built_in/disply_type/__init__.py +++ b/dbgpt/agent/plugin/commands/built_in/disply_type/__init__.py @@ -1 +1 @@ -from .show_chart_gen import static_message_img_path \ No newline at end of file +from .show_chart_gen import static_message_img_path diff --git a/dbgpt/agent/plugin/plugin.py b/dbgpt/agent/plugin/plugin.py index 9e96f41d7..bc8bafe89 100644 --- a/dbgpt/agent/plugin/plugin.py +++ b/dbgpt/agent/plugin/plugin.py @@ -4,10 +4,8 @@ logger = logging.getLogger(__name__) -class PluginLoader: - - +class PluginLoader: def load_plugins( self, generator: PluginPromptGenerator, my_plugins: List[str] ) -> PluginPromptGenerator: @@ -18,4 +16,4 @@ def load_plugins( if not plugin.can_handle_post_prompt(): continue generator = plugin.post_prompt(generator) - return generator \ No newline at end of file + return generator diff --git a/dbgpt/serve/agent/agents/controller.py b/dbgpt/serve/agent/agents/controller.py index da1a55e31..a6bc7d706 100644 --- a/dbgpt/serve/agent/agents/controller.py +++ b/dbgpt/serve/agent/agents/controller.py @@ -104,17 +104,15 @@ async def plan_chat( worker_manager = CFG.SYSTEM_APP.get_component( ComponentType.WORKER_MANAGER_FACTORY, WorkerManagerFactory ).create() - llm_task = DefaultLLMClient(worker_manager) - context: AgentContext = AgentContext( - conv_id=conv_id, llm_provider=llm_task - ) + llm_task = DefaultLLMClient(worker_manager) + context: AgentContext = AgentContext(conv_id=conv_id, llm_provider=llm_task) context.gpts_name = gpts_instance.gpts_name context.resource_db = resource_db context.resource_internet = resource_internet context.resource_knowledge = resource_knowledge context.agents = agents_names - context.llm_models = await llm_task.models() + context.llm_models = await llm_task.models() context.model_priority = llm_models_priority agent_map = defaultdict() @@ -130,9 +128,6 @@ async def plan_chat( agents.append(agent) agent_map[name] = agent - - - groupchat = PlanChat(agents=agents, messages=[], max_round=50) planner = PlannerAgent( agent_context=context, diff --git a/dbgpt/util/code_utils.py b/dbgpt/util/code_utils.py index 9f7903388..b9e5a59af 100644 --- a/dbgpt/util/code_utils.py +++ b/dbgpt/util/code_utils.py @@ -133,7 +133,6 @@ def extract_code( } - def timeout_handler(signum, frame): raise TimeoutError("Timed out!") @@ -202,6 +201,7 @@ def execute_code( try: import docker + try: docker.version except AttributeError: @@ -209,7 +209,6 @@ def execute_code( except ImportError: docker = None - if use_docker is None: if docker is None: use_docker = False @@ -371,7 +370,6 @@ def execute_code( } - def _remove_check(response): """Remove the check function from the response.""" # find the position of the check function diff --git a/examples/agents/auto_plan_agent_dialogue_example.py b/examples/agents/auto_plan_agent_dialogue_example.py index 59c05b830..992a57ed8 100644 --- a/examples/agents/auto_plan_agent_dialogue_example.py +++ b/examples/agents/auto_plan_agent_dialogue_example.py @@ -35,7 +35,7 @@ llm_client = OpenAILLMClient() context: AgentContext = AgentContext(conv_id="test456", llm_provider=llm_client) - context.llm_models = [ModelMetadata(model="gpt-3.5-turbo")] + context.llm_models = [ModelMetadata(model="gpt-3.5-turbo")] context.gpts_name = "代码分析助手" default_memory = GptsMemory() @@ -62,8 +62,8 @@ user_proxy.a_initiate_chat( recipient=manager, reviewer=user_proxy, - message="download data from https://raw.githubusercontent.com/uwdata/draco/master/data/cars.csv and plot a visualization that tells us about the relationship between weight and horsepower. Save the plot to a file. Print the fields in a dataset before visualizing it.", - # message="find papers on LLM applications from arxiv in the last week, create a markdown table of different domains.", + # message="load data from https://raw.githubusercontent.com/uwdata/draco/master/data/cars.csv and plot a visualization that tells us about the relationship between weight and horsepower. Save the plot to a file. Print the fields in a dataset before visualizing it.", + message="find papers on LLM applications from arxiv in the last week, create a markdown table of different domains.", ) ) diff --git a/examples/agents/single_agent_dialogue_example.py b/examples/agents/single_agent_dialogue_example.py index 71418c351..5fb32da1e 100644 --- a/examples/agents/single_agent_dialogue_example.py +++ b/examples/agents/single_agent_dialogue_example.py @@ -24,7 +24,6 @@ if __name__ == "__main__": - from dbgpt.model import OpenAILLMClient llm_client = OpenAILLMClient() @@ -40,7 +39,7 @@ user_proxy.a_initiate_chat( recipient=coder, reviewer=user_proxy, - message="式计算下321 * 123等于多少", #用python代码的方式计算下321 * 123等于多少 + message="式计算下321 * 123等于多少", # 用python代码的方式计算下321 * 123等于多少 # message="download data from https://raw.githubusercontent.com/uwdata/draco/master/data/cars.csv and plot a visualization that tells us about the relationship between weight and horsepower. Save the plot to a file. Print the fields in a dataset before visualizing it.", ) )