|
25 | 25 | from paddle import io |
26 | 26 |
|
27 | 27 | from ppsci.solver import printer |
28 | | -from ppsci.utils import logger |
29 | 28 | from ppsci.utils import misc |
30 | 29 |
|
31 | 30 | if TYPE_CHECKING: |
@@ -91,11 +90,6 @@ def _eval_by_dataset( |
91 | 90 | input_dict, label_dict, weight_dict = batch |
92 | 91 | reader_cost = time.perf_counter() - reader_tic |
93 | 92 |
|
94 | | - # NOTE: eliminate first 5 step for warmup |
95 | | - if iter_id == 5: |
96 | | - for key in solver.eval_time_info: |
97 | | - solver.eval_time_info[key].reset() |
98 | | - |
99 | 93 | for v in input_dict.values(): |
100 | 94 | if hasattr(v, "stop_gradient"): |
101 | 95 | v.stop_gradient = False |
@@ -168,11 +162,6 @@ def _eval_by_dataset( |
168 | 162 | for metric_name, metric_func in _validator.metric.items(): |
169 | 163 | # NOTE: compute metric with entire output and label |
170 | 164 | metric_dict = metric_func(all_output, all_label) |
171 | | - if metric_name in metric_dict_group: |
172 | | - logger.warning( |
173 | | - f"Metric name({metric_name}) already exists, please ensure " |
174 | | - "all metric names are unique over all validators." |
175 | | - ) |
176 | 165 | metric_dict_group[metric_name] = { |
177 | 166 | k: float(v) for k, v in metric_dict.items() |
178 | 167 | } |
@@ -227,11 +216,6 @@ def _eval_by_batch( |
227 | 216 | input_dict, label_dict, weight_dict = batch |
228 | 217 | reader_cost = time.perf_counter() - reader_tic |
229 | 218 |
|
230 | | - # NOTE: eliminate first 5 step for warmup |
231 | | - if iter_id == 5: |
232 | | - for key in solver.eval_time_info: |
233 | | - solver.eval_time_info[key].reset() |
234 | | - |
235 | 219 | batch_size = next(iter(input_dict.values())).shape[0] |
236 | 220 | for v in input_dict.values(): |
237 | 221 | if hasattr(v, "stop_gradient"): |
@@ -287,11 +271,6 @@ def _eval_by_batch( |
287 | 271 |
|
288 | 272 | # concatenate all metric and discard metric of padded sample(s) |
289 | 273 | for metric_name, metric_dict in metric_dict_group.items(): |
290 | | - # if metric_name in metric_dict_group: |
291 | | - # logger.warning( |
292 | | - # f"Metric name({metric_name}) already exists, please ensure " |
293 | | - # "all metric names are unique over all validators." |
294 | | - # ) |
295 | 274 | for var_name, metric_value in metric_dict.items(): |
296 | 275 | # NOTE: concat single metric(scalar) list into metric vector |
297 | 276 | metric_value = paddle.concat(metric_value)[:num_samples] |
|
0 commit comments