Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(metrics): add flush_metrics() method to allow manual flushing of metrics #2171

Merged
merged 6 commits into from
Apr 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 23 additions & 10 deletions aws_lambda_powertools/metrics/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,28 @@ def clear_metrics(self) -> None:
self.dimension_set.clear()
self.metadata_set.clear()

def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None:
"""Manually flushes the metrics. This is normally not necessary,
unless you're running on other runtimes besides Lambda, where the @log_metrics
decorator already handles things for you.

Parameters
----------
raise_on_empty_metrics : bool, optional
raise exception if no metrics are emitted, by default False
"""
if not raise_on_empty_metrics and not self.metric_set:
warnings.warn(
"No application metrics to publish. The cold-start metric may be published if enabled. "
"If application metrics should never be empty, consider using 'raise_on_empty_metrics'",
stacklevel=2,
)
else:
logger.debug("Flushing existing metrics")
metrics = self.serialize_metric_set()
print(json.dumps(metrics, separators=(",", ":")))
self.clear_metrics()

def log_metrics(
self,
lambda_handler: Union[Callable[[Dict, Any], Any], Optional[Callable[[Dict, Any, Optional[Dict]], Any]]] = None,
Expand Down Expand Up @@ -390,16 +412,7 @@ def decorate(event, context):
if capture_cold_start_metric:
self._add_cold_start_metric(context=context)
finally:
if not raise_on_empty_metrics and not self.metric_set:
warnings.warn(
"No application metrics to publish. The cold-start metric may be published if enabled. "
"If application metrics should never be empty, consider using 'raise_on_empty_metrics'",
stacklevel=2,
)
else:
metrics = self.serialize_metric_set()
self.clear_metrics()
print(json.dumps(metrics, separators=(",", ":")))
self.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics)

return response

Expand Down
10 changes: 6 additions & 4 deletions docs/core/metrics.md
Original file line number Diff line number Diff line change
Expand Up @@ -251,13 +251,15 @@ By default it will skip all previously defined dimensions including default dime

### Flushing metrics manually

If you prefer not to use `log_metrics` because you might want to encapsulate additional logic when doing so, you can manually flush and clear metrics as follows:
If you are using the AWS Lambda Web Adapter project, or a middleware with custom metric logic, you can use `flush_metrics()`. This method will serialize, print metrics available to standard output, and clear in-memory metrics data.

???+ warning
Metrics, dimensions and namespace validation still applies
This does not capture Cold Start metrics, and metric data validation still applies.

```python hl_lines="11-14" title="Manually flushing and clearing metrics from memory"
--8<-- "examples/metrics/src/single_metric.py"
Contrary to the `log_metrics` decorator, you are now also responsible to flush metrics in the event of an exception.

```python hl_lines="18" title="Manually flushing and clearing metrics from memory"
--8<-- "examples/metrics/src/flush_metrics.py"
```

### Metrics isolation
Expand Down
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This example file was not being used in the documentation, so I just changed the content and started using it.

Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
import json

from aws_lambda_powertools import Metrics
from aws_lambda_powertools.metrics import MetricUnit
from aws_lambda_powertools.utilities.typing import LambdaContext

metrics = Metrics()


def lambda_handler(event: dict, context: LambdaContext):
def book_flight(flight_id: str, **kwargs):
# logic to book flight
...
metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
your_metrics_object = metrics.serialize_metric_set()
metrics.clear_metrics()
print(json.dumps(your_metrics_object))


def lambda_handler(event: dict, context: LambdaContext):
rubenfonseca marked this conversation as resolved.
Show resolved Hide resolved
try:
book_flight(flight_id=event.get("flight_id", ""))
finally:
metrics.flush_metrics()
20 changes: 20 additions & 0 deletions tests/functional/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,26 @@ def lambda_handler(evt, ctx):
assert expected == output


def test_log_metrics_manual_flush(capsys, metrics, dimensions, namespace):
# GIVEN Metrics is initialized
my_metrics = Metrics(namespace=namespace)
for metric in metrics:
my_metrics.add_metric(**metric)
for dimension in dimensions:
my_metrics.add_dimension(**dimension)

# WHEN we manually the metrics
my_metrics.flush_metrics()

output = capture_metrics_output(capsys)
expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace)

# THEN we should have no exceptions
# and a valid EMF object should be flushed correctly
remove_timestamp(metrics=[output, expected])
assert expected == output


def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace):
# GIVEN POWERTOOLS_METRICS_NAMESPACE is set
monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace)
Expand Down