@@ -72,14 +72,16 @@ def __init__(self, pr_url: str, cli_mode=False, args: list = None,
72
72
async def run (self ):
73
73
try :
74
74
get_logger ().info ('Generating code suggestions for PR...' )
75
+ relevant_configs = {'pr_code_suggestions' : dict (get_settings ().pr_code_suggestions ),
76
+ 'config' : dict (get_settings ().config )}
77
+ get_logger ().debug (f"Relevant configs:\n { relevant_configs } " )
75
78
76
79
if get_settings ().config .publish_output :
77
80
if self .git_provider .is_supported ("gfm_markdown" ):
78
81
self .progress_response = self .git_provider .publish_comment (self .progress )
79
82
else :
80
83
self .git_provider .publish_comment ("Preparing suggestions..." , is_temporary = True )
81
84
82
- get_logger ().info ('Preparing PR code suggestions...' )
83
85
if not self .is_extended :
84
86
await retry_with_fallback_models (self ._prepare_prediction , ModelType .TURBO )
85
87
data = self ._prepare_pr_code_suggestions ()
@@ -97,10 +99,8 @@ async def run(self):
97
99
data ['code_suggestions' ] = await self .rank_suggestions (data ['code_suggestions' ])
98
100
99
101
if get_settings ().config .publish_output :
100
- get_logger ().info ('Pushing PR code suggestions...' )
101
102
self .git_provider .remove_initial_comment ()
102
103
if get_settings ().pr_code_suggestions .summarize and self .git_provider .is_supported ("gfm_markdown" ):
103
- get_logger ().info ('Pushing summarize code suggestions...' )
104
104
105
105
# generate summarized suggestions
106
106
pr_body = self .generate_summarized_suggestions (data )
@@ -117,7 +117,6 @@ async def run(self):
117
117
self .git_provider .publish_comment (pr_body )
118
118
119
119
else :
120
- get_logger ().info ('Pushing inline code suggestions...' )
121
120
self .push_inline_code_suggestions (data )
122
121
if self .progress_response :
123
122
self .progress_response .delete ()
@@ -133,25 +132,23 @@ async def _prepare_prediction(self, model: str):
133
132
model ,
134
133
add_line_numbers_to_hunks = True ,
135
134
disable_extra_lines = True )
136
-
137
- get_logger ().info ('Getting AI prediction...' )
138
- self .prediction = await self ._get_prediction (model , patches_diff )
135
+ if self .patches_diff :
136
+ get_logger ().debug (f"PR diff:\n { self .patches_diff } " )
137
+ self .prediction = await self ._get_prediction (model , patches_diff )
138
+ else :
139
+ get_logger ().error (f"Error getting PR diff" )
140
+ self .prediction = None
139
141
140
142
async def _get_prediction (self , model : str , patches_diff : str ):
141
143
variables = copy .deepcopy (self .vars )
142
144
variables ["diff" ] = patches_diff # update diff
143
145
environment = Environment (undefined = StrictUndefined )
144
146
system_prompt = environment .from_string (get_settings ().pr_code_suggestions_prompt .system ).render (variables )
145
147
user_prompt = environment .from_string (get_settings ().pr_code_suggestions_prompt .user ).render (variables )
146
- if get_settings ().config .verbosity_level >= 2 :
147
- get_logger ().info (f"\n System prompt:\n { system_prompt } " )
148
- get_logger ().info (f"\n User prompt:\n { user_prompt } " )
148
+
149
149
response , finish_reason = await self .ai_handler .chat_completion (model = model , temperature = 0.2 ,
150
150
system = system_prompt , user = user_prompt )
151
151
152
- if get_settings ().config .verbosity_level >= 2 :
153
- get_logger ().info (f"\n AI response:\n { response } " )
154
-
155
152
return response
156
153
157
154
def _prepare_pr_code_suggestions (self ) -> Dict :
@@ -185,8 +182,6 @@ def push_inline_code_suggestions(self, data):
185
182
186
183
for d in data ['code_suggestions' ]:
187
184
try :
188
- if get_settings ().config .verbosity_level >= 2 :
189
- get_logger ().info (f"suggestion: { d } " )
190
185
relevant_file = d ['relevant_file' ].strip ()
191
186
relevant_lines_start = int (d ['relevant_lines_start' ]) # absolute position
192
187
relevant_lines_end = int (d ['relevant_lines_end' ])
@@ -202,8 +197,7 @@ def push_inline_code_suggestions(self, data):
202
197
'relevant_lines_start' : relevant_lines_start ,
203
198
'relevant_lines_end' : relevant_lines_end })
204
199
except Exception :
205
- if get_settings ().config .verbosity_level >= 2 :
206
- get_logger ().info (f"Could not parse suggestion: { d } " )
200
+ get_logger ().info (f"Could not parse suggestion: { d } " )
207
201
208
202
is_successful = self .git_provider .publish_code_suggestions (code_suggestions )
209
203
if not is_successful :
@@ -229,8 +223,7 @@ def dedent_code(self, relevant_file, relevant_lines_start, new_code_snippet):
229
223
if delta_spaces > 0 :
230
224
new_code_snippet = textwrap .indent (new_code_snippet , delta_spaces * " " ).rstrip ('\n ' )
231
225
except Exception as e :
232
- if get_settings ().config .verbosity_level >= 2 :
233
- get_logger ().info (f"Could not dedent code snippet for file { relevant_file } , error: { e } " )
226
+ get_logger ().error (f"Could not dedent code snippet for file { relevant_file } , error: { e } " )
234
227
235
228
return new_code_snippet
236
229
@@ -245,32 +238,33 @@ def _get_is_extended(self, args: list[str]) -> bool:
245
238
return False
246
239
247
240
async def _prepare_prediction_extended (self , model : str ) -> dict :
248
- get_logger ().info ('Getting PR diff...' )
249
241
patches_diff_list = get_pr_multi_diffs (self .git_provider , self .token_handler , model ,
250
242
max_calls = get_settings ().pr_code_suggestions .max_number_of_calls )
243
+ if patches_diff_list :
244
+ get_logger ().debug (f"PR diff:\n { patches_diff_list } " )
251
245
252
- # parallelize calls to AI:
253
- if get_settings ().pr_code_suggestions .parallel_calls :
254
- get_logger ().info ('Getting multi AI predictions in parallel...' )
255
- prediction_list = await asyncio .gather (* [self ._get_prediction (model , patches_diff ) for patches_diff in patches_diff_list ])
256
- self .prediction_list = prediction_list
257
- else :
258
- get_logger ().info ('Getting multi AI predictions...' )
259
- prediction_list = []
260
- for i , patches_diff in enumerate (patches_diff_list ):
261
- get_logger ().info (f"Processing chunk { i + 1 } of { len (patches_diff_list )} " )
262
- prediction = await self ._get_prediction (model , patches_diff )
263
- prediction_list .append (prediction )
264
-
265
- data = {}
266
- for prediction in prediction_list :
267
- self .prediction = prediction
268
- data_per_chunk = self ._prepare_pr_code_suggestions ()
269
- if "code_suggestions" in data :
270
- data ["code_suggestions" ].extend (data_per_chunk ["code_suggestions" ])
246
+ # parallelize calls to AI:
247
+ if get_settings ().pr_code_suggestions .parallel_calls :
248
+ prediction_list = await asyncio .gather (* [self ._get_prediction (model , patches_diff ) for patches_diff in patches_diff_list ])
249
+ self .prediction_list = prediction_list
271
250
else :
272
- data .update (data_per_chunk )
273
- self .data = data
251
+ prediction_list = []
252
+ for i , patches_diff in enumerate (patches_diff_list ):
253
+ prediction = await self ._get_prediction (model , patches_diff )
254
+ prediction_list .append (prediction )
255
+
256
+ data = {}
257
+ for prediction in prediction_list :
258
+ self .prediction = prediction
259
+ data_per_chunk = self ._prepare_pr_code_suggestions ()
260
+ if "code_suggestions" in data :
261
+ data ["code_suggestions" ].extend (data_per_chunk ["code_suggestions" ])
262
+ else :
263
+ data .update (data_per_chunk )
264
+ self .data = data
265
+ else :
266
+ get_logger ().error (f"Error getting PR diff" )
267
+ self .data = data = None
274
268
return data
275
269
276
270
async def rank_suggestions (self , data : List ) -> List :
@@ -305,9 +299,7 @@ async def rank_suggestions(self, data: List) -> List:
305
299
system_prompt = environment .from_string (get_settings ().pr_sort_code_suggestions_prompt .system ).render (
306
300
variables )
307
301
user_prompt = environment .from_string (get_settings ().pr_sort_code_suggestions_prompt .user ).render (variables )
308
- if get_settings ().config .verbosity_level >= 2 :
309
- get_logger ().info (f"\n System prompt:\n { system_prompt } " )
310
- get_logger ().info (f"\n User prompt:\n { user_prompt } " )
302
+
311
303
response , finish_reason = await self .ai_handler .chat_completion (model = model , system = system_prompt ,
312
304
user = user_prompt )
313
305
0 commit comments