Skip to content

Commit

Permalink
feat(Agent): format code style
Browse files Browse the repository at this point in the history
format code style
  • Loading branch information
yhjun1026 committed Dec 26, 2023
1 parent 46eaff8 commit 9d48078
Show file tree
Hide file tree
Showing 13 changed files with 18 additions and 32 deletions.
3 changes: 2 additions & 1 deletion dbgpt/agent/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from ..memory.gpts_memory import GptsMemory
from dbgpt.core.interface.llm import ModelMetadata


class Agent:
"""
An interface for AI agent.
Expand Down Expand Up @@ -166,7 +167,7 @@ def to_dict(self) -> Dict[str, Any]:
@dataclass
class AgentContext:
conv_id: str
llm_provider: Optional['LLMClient']
llm_provider: Optional["LLMClient"]

gpts_name: Optional[str] = None
resource_db: Optional[AgentResource] = None
Expand Down
4 changes: 1 addition & 3 deletions dbgpt/agent/agents/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -636,7 +636,6 @@ def _get_model_priority(self):
else:
return None


def _filter_health_models(self, need_uses: Optional[list]):
all_modes = self.agent_context.llm_models
can_uses = []
Expand All @@ -645,7 +644,6 @@ def _filter_health_models(self, need_uses: Optional[list]):
can_uses.append(item)
return can_uses


def _select_llm_model(self, old_model: str = None):
"""
LLM model selector, currently only supports manual selection, more strategies will be opened in the future
Expand All @@ -662,7 +660,7 @@ def _select_llm_model(self, old_model: str = None):
if old_model:
filtered_list = [item for item in all_modes if item.model != old_model]
if filtered_list and len(filtered_list) >= 1:
now_model= filtered_list[0]
now_model = filtered_list[0]
return now_model.model

async def a_reasoning_reply(
Expand Down
2 changes: 1 addition & 1 deletion dbgpt/agent/agents/expand/code_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ async def a_verify(self, message: Optional[Dict]):
check_reult, model = await self.a_reasoning_reply(
[
{
"role": ModelMessageRoleType.HUMAN,
"role": ModelMessageRoleType.HUMAN,
"content": f"""Please understand the following task objectives and results and give your judgment:
Task Gogal: {task_gogal}
Execution Result: {task_result}
Expand Down
2 changes: 0 additions & 2 deletions dbgpt/agent/agents/expand/plugin_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,9 +113,7 @@ async def tool_call(
### Answer failed, turn on automatic repair
rensponse_succ = False
else:

try:

view = ""
except Exception as e:
view = f"```vis-convert-error\n{content}\n```"
Expand Down
4 changes: 3 additions & 1 deletion dbgpt/agent/agents/llm/llm_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,9 @@ async def _completions_create(self, llm_model, params):
try:
model_request = _build_model_request(payload)
model_output = await self._llm_client.generate(model_request)
parsed_output = self._output_parser.parse_model_nostream_resp(model_output, "###")
parsed_output = self._output_parser.parse_model_nostream_resp(
model_output, "###"
)
return parsed_output
except Exception as e:
logger.error(
Expand Down
2 changes: 0 additions & 2 deletions dbgpt/agent/agents/plan_group_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,6 @@ def __init__(
max_consecutive_auto_reply: Optional[int] = sys.maxsize,
human_input_mode: Optional[str] = "NEVER",
describe: Optional[str] = "Plan chat manager.",

**kwargs,
):
super().__init__(
Expand Down Expand Up @@ -431,7 +430,6 @@ async def a_run_chat(
plan_result,
)
except Exception as e:

logger.exception(
f"An exception was encountered during the execution of the current plan step.{str(e)}"
)
Expand Down
1 change: 0 additions & 1 deletion dbgpt/agent/agents/planner_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ def __init__(
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "NEVER",

**kwargs,
):
super().__init__(
Expand Down
Original file line number Diff line number Diff line change
@@ -1 +1 @@
from .show_chart_gen import static_message_img_path
from .show_chart_gen import static_message_img_path
6 changes: 2 additions & 4 deletions dbgpt/agent/plugin/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,8 @@

logger = logging.getLogger(__name__)

class PluginLoader:



class PluginLoader:
def load_plugins(
self, generator: PluginPromptGenerator, my_plugins: List[str]
) -> PluginPromptGenerator:
Expand All @@ -18,4 +16,4 @@ def load_plugins(
if not plugin.can_handle_post_prompt():
continue
generator = plugin.post_prompt(generator)
return generator
return generator
11 changes: 3 additions & 8 deletions dbgpt/serve/agent/agents/controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,17 +104,15 @@ async def plan_chat(
worker_manager = CFG.SYSTEM_APP.get_component(
ComponentType.WORKER_MANAGER_FACTORY, WorkerManagerFactory
).create()
llm_task = DefaultLLMClient(worker_manager)
context: AgentContext = AgentContext(
conv_id=conv_id, llm_provider=llm_task
)
llm_task = DefaultLLMClient(worker_manager)
context: AgentContext = AgentContext(conv_id=conv_id, llm_provider=llm_task)
context.gpts_name = gpts_instance.gpts_name
context.resource_db = resource_db
context.resource_internet = resource_internet
context.resource_knowledge = resource_knowledge
context.agents = agents_names

context.llm_models = await llm_task.models()
context.llm_models = await llm_task.models()
context.model_priority = llm_models_priority

agent_map = defaultdict()
Expand All @@ -130,9 +128,6 @@ async def plan_chat(
agents.append(agent)
agent_map[name] = agent




groupchat = PlanChat(agents=agents, messages=[], max_round=50)
planner = PlannerAgent(
agent_context=context,
Expand Down
4 changes: 1 addition & 3 deletions dbgpt/util/code_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@ def extract_code(
}



def timeout_handler(signum, frame):
raise TimeoutError("Timed out!")

Expand Down Expand Up @@ -202,14 +201,14 @@ def execute_code(

try:
import docker

try:
docker.version
except AttributeError:
docker = None
except ImportError:
docker = None


if use_docker is None:
if docker is None:
use_docker = False
Expand Down Expand Up @@ -371,7 +370,6 @@ def execute_code(
}



def _remove_check(response):
"""Remove the check function from the response."""
# find the position of the check function
Expand Down
6 changes: 3 additions & 3 deletions examples/agents/auto_plan_agent_dialogue_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@

llm_client = OpenAILLMClient()
context: AgentContext = AgentContext(conv_id="test456", llm_provider=llm_client)
context.llm_models = [ModelMetadata(model="gpt-3.5-turbo")]
context.llm_models = [ModelMetadata(model="gpt-3.5-turbo")]
context.gpts_name = "代码分析助手"

default_memory = GptsMemory()
Expand All @@ -62,8 +62,8 @@
user_proxy.a_initiate_chat(
recipient=manager,
reviewer=user_proxy,
message="download data from https://raw.githubusercontent.com/uwdata/draco/master/data/cars.csv and plot a visualization that tells us about the relationship between weight and horsepower. Save the plot to a file. Print the fields in a dataset before visualizing it.",
# message="find papers on LLM applications from arxiv in the last week, create a markdown table of different domains.",
# message="load data from https://raw.githubusercontent.com/uwdata/draco/master/data/cars.csv and plot a visualization that tells us about the relationship between weight and horsepower. Save the plot to a file. Print the fields in a dataset before visualizing it.",
message="find papers on LLM applications from arxiv in the last week, create a markdown table of different domains.",
)
)

Expand Down
3 changes: 1 addition & 2 deletions examples/agents/single_agent_dialogue_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@


if __name__ == "__main__":

from dbgpt.model import OpenAILLMClient

llm_client = OpenAILLMClient()
Expand All @@ -40,7 +39,7 @@
user_proxy.a_initiate_chat(
recipient=coder,
reviewer=user_proxy,
message="式计算下321 * 123等于多少", #用python代码的方式计算下321 * 123等于多少
message="式计算下321 * 123等于多少", # 用python代码的方式计算下321 * 123等于多少
# message="download data from https://raw.githubusercontent.com/uwdata/draco/master/data/cars.csv and plot a visualization that tells us about the relationship between weight and horsepower. Save the plot to a file. Print the fields in a dataset before visualizing it.",
)
)
Expand Down

0 comments on commit 9d48078

Please sign in to comment.