Skip to content

Commit b40894b

Browse files
committed
Refactor logging in pr_reviewer.py, litellm_ai_handler.py, and pr_description.py for improved debugging
1 parent df3a463 commit b40894b

File tree

9 files changed

+86
-115
lines changed

9 files changed

+86
-115
lines changed

pr_agent/algo/ai_handlers/litellm_ai_handler.py

+9-3
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,10 @@ async def chat_completion(self, model: str, system: str, user: str, temperature:
113113
}
114114
if self.aws_bedrock_client:
115115
kwargs["aws_bedrock_client"] = self.aws_bedrock_client
116+
117+
get_logger().debug(f"\nSystem prompt:\n{system}")
118+
get_logger().debug(f"\nUser prompt:\n{user}")
119+
116120
response = await acompletion(**kwargs)
117121
except (APIError, Timeout, TryAgain) as e:
118122
get_logger().error("Error during OpenAI inference: ", e)
@@ -127,7 +131,9 @@ async def chat_completion(self, model: str, system: str, user: str, temperature:
127131
raise TryAgain
128132
resp = response["choices"][0]['message']['content']
129133
finish_reason = response["choices"][0]["finish_reason"]
130-
usage = response.get("usage")
131-
get_logger().info("AI response", response=resp, messages=messages, finish_reason=finish_reason,
132-
model=model, usage=usage)
134+
135+
# usage = response.get("usage")
136+
get_logger().debug(f"\nAI response:\n{resp}")
137+
get_logger().debug("\nAI response extra details", response=response)
138+
133139
return resp, finish_reason

pr_agent/algo/pr_processing.py

+4-5
Original file line numberDiff line numberDiff line change
@@ -225,11 +225,10 @@ async def retry_with_fallback_models(f: Callable, model_type: ModelType = ModelT
225225
# try each (model, deployment_id) pair until one is successful, otherwise raise exception
226226
for i, (model, deployment_id) in enumerate(zip(all_models, all_deployments)):
227227
try:
228-
if get_settings().config.verbosity_level >= 2:
229-
get_logger().debug(
230-
f"Generating prediction with {model}"
231-
f"{(' from deployment ' + deployment_id) if deployment_id else ''}"
232-
)
228+
get_logger().debug(
229+
f"Generating prediction with {model}"
230+
f"{(' from deployment ' + deployment_id) if deployment_id else ''}"
231+
)
233232
get_settings().set("openai.deployment_id", deployment_id)
234233
return await f(model)
235234
except Exception as e:

pr_agent/cli.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,8 @@
66
from pr_agent.config_loader import get_settings
77
from pr_agent.log import setup_logger
88

9-
setup_logger()
9+
log_level = os.environ.get("LOG_LEVEL", "INFO")
10+
setup_logger(log_level)
1011

1112

1213

pr_agent/git_providers/git_provider.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -57,9 +57,12 @@ def get_pr_description(self, *, full: bool = True) -> str:
5757
return description
5858

5959
def get_user_description(self) -> str:
60+
if hasattr(self, 'user_description') and not (self.user_description is None):
61+
return self.user_description
62+
6063
description = (self.get_pr_description_full() or "").strip()
6164
description_lowercase = description.lower()
62-
get_logger().info(f"Existing description:\n{description_lowercase}")
65+
get_logger().debug(f"Existing description:\n{description_lowercase}")
6366

6467
# if the existing description wasn't generated by the pr-agent, just return it as-is
6568
if not self._is_generated_by_pr_agent(description_lowercase):
@@ -94,6 +97,7 @@ def get_user_description(self) -> str:
9497
original_user_description = original_user_description[len(user_description_header):].strip()
9598

9699
get_logger().info(f"Extracted user description from existing description:\n{original_user_description}")
100+
self.user_description = original_user_description
97101
return original_user_description
98102

99103
def _possible_headers(self):

pr_agent/git_providers/utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def apply_repo_settings(pr_url):
3838
section_dict[key] = value
3939
get_settings().unset(section)
4040
get_settings().set(section, section_dict, merge=False)
41-
get_logger().info(f"Applying repo settings for section {section}, contents: {contents}")
41+
get_logger().info(f"Applying repo settings:\n{new_settings.as_dict()}")
4242
except Exception as e:
4343
get_logger().exception("Failed to apply repo settings", e)
4444
finally:

pr_agent/log/__init__.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import json
22
import logging
3+
import os
34
import sys
45
from enum import Enum
56

@@ -20,7 +21,7 @@ def setup_logger(level: str = "INFO", fmt: LoggingFormat = LoggingFormat.CONSOLE
2021
if type(level) is not int:
2122
level = logging.INFO
2223

23-
if fmt == LoggingFormat.JSON:
24+
if fmt == LoggingFormat.JSON and os.getenv("LOG_SANE", "0").lower() == "0": # better debugging github_app
2425
logger.remove(None)
2526
logger.add(
2627
sys.stdout,

pr_agent/tools/pr_code_suggestions.py

+36-44
Original file line numberDiff line numberDiff line change
@@ -72,14 +72,16 @@ def __init__(self, pr_url: str, cli_mode=False, args: list = None,
7272
async def run(self):
7373
try:
7474
get_logger().info('Generating code suggestions for PR...')
75+
relevant_configs = {'pr_code_suggestions': dict(get_settings().pr_code_suggestions),
76+
'config': dict(get_settings().config)}
77+
get_logger().debug(f"Relevant configs:\n{relevant_configs}")
7578

7679
if get_settings().config.publish_output:
7780
if self.git_provider.is_supported("gfm_markdown"):
7881
self.progress_response = self.git_provider.publish_comment(self.progress)
7982
else:
8083
self.git_provider.publish_comment("Preparing suggestions...", is_temporary=True)
8184

82-
get_logger().info('Preparing PR code suggestions...')
8385
if not self.is_extended:
8486
await retry_with_fallback_models(self._prepare_prediction, ModelType.TURBO)
8587
data = self._prepare_pr_code_suggestions()
@@ -97,10 +99,8 @@ async def run(self):
9799
data['code_suggestions'] = await self.rank_suggestions(data['code_suggestions'])
98100

99101
if get_settings().config.publish_output:
100-
get_logger().info('Pushing PR code suggestions...')
101102
self.git_provider.remove_initial_comment()
102103
if get_settings().pr_code_suggestions.summarize and self.git_provider.is_supported("gfm_markdown"):
103-
get_logger().info('Pushing summarize code suggestions...')
104104

105105
# generate summarized suggestions
106106
pr_body = self.generate_summarized_suggestions(data)
@@ -117,7 +117,6 @@ async def run(self):
117117
self.git_provider.publish_comment(pr_body)
118118

119119
else:
120-
get_logger().info('Pushing inline code suggestions...')
121120
self.push_inline_code_suggestions(data)
122121
if self.progress_response:
123122
self.progress_response.delete()
@@ -133,25 +132,23 @@ async def _prepare_prediction(self, model: str):
133132
model,
134133
add_line_numbers_to_hunks=True,
135134
disable_extra_lines=True)
136-
137-
get_logger().info('Getting AI prediction...')
138-
self.prediction = await self._get_prediction(model, patches_diff)
135+
if self.patches_diff:
136+
get_logger().debug(f"PR diff:\n{self.patches_diff}")
137+
self.prediction = await self._get_prediction(model, patches_diff)
138+
else:
139+
get_logger().error(f"Error getting PR diff")
140+
self.prediction = None
139141

140142
async def _get_prediction(self, model: str, patches_diff: str):
141143
variables = copy.deepcopy(self.vars)
142144
variables["diff"] = patches_diff # update diff
143145
environment = Environment(undefined=StrictUndefined)
144146
system_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.system).render(variables)
145147
user_prompt = environment.from_string(get_settings().pr_code_suggestions_prompt.user).render(variables)
146-
if get_settings().config.verbosity_level >= 2:
147-
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
148-
get_logger().info(f"\nUser prompt:\n{user_prompt}")
148+
149149
response, finish_reason = await self.ai_handler.chat_completion(model=model, temperature=0.2,
150150
system=system_prompt, user=user_prompt)
151151

152-
if get_settings().config.verbosity_level >= 2:
153-
get_logger().info(f"\nAI response:\n{response}")
154-
155152
return response
156153

157154
def _prepare_pr_code_suggestions(self) -> Dict:
@@ -185,8 +182,6 @@ def push_inline_code_suggestions(self, data):
185182

186183
for d in data['code_suggestions']:
187184
try:
188-
if get_settings().config.verbosity_level >= 2:
189-
get_logger().info(f"suggestion: {d}")
190185
relevant_file = d['relevant_file'].strip()
191186
relevant_lines_start = int(d['relevant_lines_start']) # absolute position
192187
relevant_lines_end = int(d['relevant_lines_end'])
@@ -202,8 +197,7 @@ def push_inline_code_suggestions(self, data):
202197
'relevant_lines_start': relevant_lines_start,
203198
'relevant_lines_end': relevant_lines_end})
204199
except Exception:
205-
if get_settings().config.verbosity_level >= 2:
206-
get_logger().info(f"Could not parse suggestion: {d}")
200+
get_logger().info(f"Could not parse suggestion: {d}")
207201

208202
is_successful = self.git_provider.publish_code_suggestions(code_suggestions)
209203
if not is_successful:
@@ -229,8 +223,7 @@ def dedent_code(self, relevant_file, relevant_lines_start, new_code_snippet):
229223
if delta_spaces > 0:
230224
new_code_snippet = textwrap.indent(new_code_snippet, delta_spaces * " ").rstrip('\n')
231225
except Exception as e:
232-
if get_settings().config.verbosity_level >= 2:
233-
get_logger().info(f"Could not dedent code snippet for file {relevant_file}, error: {e}")
226+
get_logger().error(f"Could not dedent code snippet for file {relevant_file}, error: {e}")
234227

235228
return new_code_snippet
236229

@@ -245,32 +238,33 @@ def _get_is_extended(self, args: list[str]) -> bool:
245238
return False
246239

247240
async def _prepare_prediction_extended(self, model: str) -> dict:
248-
get_logger().info('Getting PR diff...')
249241
patches_diff_list = get_pr_multi_diffs(self.git_provider, self.token_handler, model,
250242
max_calls=get_settings().pr_code_suggestions.max_number_of_calls)
243+
if patches_diff_list:
244+
get_logger().debug(f"PR diff:\n{patches_diff_list}")
251245

252-
# parallelize calls to AI:
253-
if get_settings().pr_code_suggestions.parallel_calls:
254-
get_logger().info('Getting multi AI predictions in parallel...')
255-
prediction_list = await asyncio.gather(*[self._get_prediction(model, patches_diff) for patches_diff in patches_diff_list])
256-
self.prediction_list = prediction_list
257-
else:
258-
get_logger().info('Getting multi AI predictions...')
259-
prediction_list = []
260-
for i, patches_diff in enumerate(patches_diff_list):
261-
get_logger().info(f"Processing chunk {i + 1} of {len(patches_diff_list)}")
262-
prediction = await self._get_prediction(model, patches_diff)
263-
prediction_list.append(prediction)
264-
265-
data = {}
266-
for prediction in prediction_list:
267-
self.prediction = prediction
268-
data_per_chunk = self._prepare_pr_code_suggestions()
269-
if "code_suggestions" in data:
270-
data["code_suggestions"].extend(data_per_chunk["code_suggestions"])
246+
# parallelize calls to AI:
247+
if get_settings().pr_code_suggestions.parallel_calls:
248+
prediction_list = await asyncio.gather(*[self._get_prediction(model, patches_diff) for patches_diff in patches_diff_list])
249+
self.prediction_list = prediction_list
271250
else:
272-
data.update(data_per_chunk)
273-
self.data = data
251+
prediction_list = []
252+
for i, patches_diff in enumerate(patches_diff_list):
253+
prediction = await self._get_prediction(model, patches_diff)
254+
prediction_list.append(prediction)
255+
256+
data = {}
257+
for prediction in prediction_list:
258+
self.prediction = prediction
259+
data_per_chunk = self._prepare_pr_code_suggestions()
260+
if "code_suggestions" in data:
261+
data["code_suggestions"].extend(data_per_chunk["code_suggestions"])
262+
else:
263+
data.update(data_per_chunk)
264+
self.data = data
265+
else:
266+
get_logger().error(f"Error getting PR diff")
267+
self.data = data = None
274268
return data
275269

276270
async def rank_suggestions(self, data: List) -> List:
@@ -305,9 +299,7 @@ async def rank_suggestions(self, data: List) -> List:
305299
system_prompt = environment.from_string(get_settings().pr_sort_code_suggestions_prompt.system).render(
306300
variables)
307301
user_prompt = environment.from_string(get_settings().pr_sort_code_suggestions_prompt.user).render(variables)
308-
if get_settings().config.verbosity_level >= 2:
309-
get_logger().info(f"\nSystem prompt:\n{system_prompt}")
310-
get_logger().info(f"\nUser prompt:\n{user_prompt}")
302+
311303
response, finish_reason = await self.ai_handler.chat_completion(model=model, system=system_prompt,
312304
user=user_prompt)
313305

0 commit comments

Comments
 (0)