Skip to content

Commit

Permalink
feat(metrics): add flush_metrics() method to allow manual flushing of…
Browse files Browse the repository at this point in the history
… metrics (#2171)

Co-authored-by: Heitor Lessa <lessa@amazon.co.uk>
Co-authored-by: Leandro Damascena <leandro.damascena@gmail.com>
  • Loading branch information
3 people authored Apr 28, 2023
1 parent 14147cb commit d4607f3
Show file tree
Hide file tree
Showing 4 changed files with 59 additions and 20 deletions.
33 changes: 23 additions & 10 deletions aws_lambda_powertools/metrics/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,28 @@ def clear_metrics(self) -> None:
self.dimension_set.clear()
self.metadata_set.clear()

def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None:
"""Manually flushes the metrics. This is normally not necessary,
unless you're running on other runtimes besides Lambda, where the @log_metrics
decorator already handles things for you.
Parameters
----------
raise_on_empty_metrics : bool, optional
raise exception if no metrics are emitted, by default False
"""
if not raise_on_empty_metrics and not self.metric_set:
warnings.warn(
"No application metrics to publish. The cold-start metric may be published if enabled. "
"If application metrics should never be empty, consider using 'raise_on_empty_metrics'",
stacklevel=2,
)
else:
logger.debug("Flushing existing metrics")
metrics = self.serialize_metric_set()
print(json.dumps(metrics, separators=(",", ":")))
self.clear_metrics()

def log_metrics(
self,
lambda_handler: Union[Callable[[Dict, Any], Any], Optional[Callable[[Dict, Any, Optional[Dict]], Any]]] = None,
Expand Down Expand Up @@ -390,16 +412,7 @@ def decorate(event, context):
if capture_cold_start_metric:
self._add_cold_start_metric(context=context)
finally:
if not raise_on_empty_metrics and not self.metric_set:
warnings.warn(
"No application metrics to publish. The cold-start metric may be published if enabled. "
"If application metrics should never be empty, consider using 'raise_on_empty_metrics'",
stacklevel=2,
)
else:
metrics = self.serialize_metric_set()
self.clear_metrics()
print(json.dumps(metrics, separators=(",", ":")))
self.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics)

return response

Expand Down
10 changes: 6 additions & 4 deletions docs/core/metrics.md
Original file line number Diff line number Diff line change
Expand Up @@ -251,13 +251,15 @@ By default it will skip all previously defined dimensions including default dime

### Flushing metrics manually

If you prefer not to use `log_metrics` because you might want to encapsulate additional logic when doing so, you can manually flush and clear metrics as follows:
If you are using the AWS Lambda Web Adapter project, or a middleware with custom metric logic, you can use `flush_metrics()`. This method will serialize, print metrics available to standard output, and clear in-memory metrics data.

???+ warning
Metrics, dimensions and namespace validation still applies
This does not capture Cold Start metrics, and metric data validation still applies.

```python hl_lines="11-14" title="Manually flushing and clearing metrics from memory"
--8<-- "examples/metrics/src/single_metric.py"
Contrary to the `log_metrics` decorator, you are now also responsible to flush metrics in the event of an exception.

```python hl_lines="18" title="Manually flushing and clearing metrics from memory"
--8<-- "examples/metrics/src/flush_metrics.py"
```

### Metrics isolation
Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
import json

from aws_lambda_powertools import Metrics
from aws_lambda_powertools.metrics import MetricUnit
from aws_lambda_powertools.utilities.typing import LambdaContext

metrics = Metrics()


def lambda_handler(event: dict, context: LambdaContext):
def book_flight(flight_id: str, **kwargs):
# logic to book flight
...
metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
your_metrics_object = metrics.serialize_metric_set()
metrics.clear_metrics()
print(json.dumps(your_metrics_object))


def lambda_handler(event: dict, context: LambdaContext):
try:
book_flight(flight_id=event.get("flight_id", ""))
finally:
metrics.flush_metrics()
20 changes: 20 additions & 0 deletions tests/functional/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,26 @@ def lambda_handler(evt, ctx):
assert expected == output


def test_log_metrics_manual_flush(capsys, metrics, dimensions, namespace):
# GIVEN Metrics is initialized
my_metrics = Metrics(namespace=namespace)
for metric in metrics:
my_metrics.add_metric(**metric)
for dimension in dimensions:
my_metrics.add_dimension(**dimension)

# WHEN we manually the metrics
my_metrics.flush_metrics()

output = capture_metrics_output(capsys)
expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace)

# THEN we should have no exceptions
# and a valid EMF object should be flushed correctly
remove_timestamp(metrics=[output, expected])
assert expected == output


def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace):
# GIVEN POWERTOOLS_METRICS_NAMESPACE is set
monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace)
Expand Down

0 comments on commit d4607f3

Please sign in to comment.