diff --git a/lib/logger.dart b/lib/logger.dart index 6908eb76..7571068a 100644 --- a/lib/logger.dart +++ b/lib/logger.dart @@ -3,4 +3,6 @@ library logger; export 'src/outputs/file_output_stub.dart' if (dart.library.io) 'src/outputs/file_output.dart'; +export 'src/outputs/advanced_file_output_stub.dart' + if (dart.library.io) 'src/outputs/advanced_file_output.dart'; export 'web.dart'; diff --git a/lib/src/outputs/advanced_file_output.dart b/lib/src/outputs/advanced_file_output.dart new file mode 100644 index 00000000..8b562e82 --- /dev/null +++ b/lib/src/outputs/advanced_file_output.dart @@ -0,0 +1,196 @@ +import 'dart:async'; +import 'dart:convert'; +import 'dart:io'; + +import '../log_level.dart'; +import '../log_output.dart'; +import '../output_event.dart'; + +extension _NumExt on num { + String toDigits(int digits) => toString().padLeft(digits, '0'); +} + +/// Accumulates logs in a buffer to reduce frequent disk, writes while optionally +/// switching to a new log file if it reaches a certain size. +/// +/// [AdvancedFileOutput] offer various improvements over the original +/// [FileOutput]: +/// * Managing an internal buffer which collects the logs and only writes +/// them after a certain period of time to the disk. +/// * Dynamically switching log files instead of using a single one specified +/// by the user, when the current file reaches a specified size limit (optionally). +/// +/// The buffered output can significantly reduce the +/// frequency of file writes, which can be beneficial for (micro-)SD storage +/// and other types of low-cost storage (e.g. on IoT devices). Specific log +/// levels can trigger an immediate flush, without waiting for the next timer +/// tick. +/// +/// New log files are created when the current file reaches the specified size +/// limit. This is useful for writing "archives" of telemetry data and logs +/// while keeping them structured. +class AdvancedFileOutput extends LogOutput { + /// Creates a buffered file output. + /// + /// By default, the log is buffered until either the [maxBufferSize] has been + /// reached, the timer controlled by [maxDelay] has been triggered or an + /// [OutputEvent] contains a [writeImmediately] log level. + /// + /// [maxFileSizeKB] controls the log file rotation. The output automatically + /// switches to a new log file as soon as the current file exceeds it. + /// Use -1 to disable log rotation. + /// + /// [maxDelay] describes the maximum amount of time before the buffer has to be + /// written to the file. + /// + /// Any log levels that are specified in [writeImmediately] trigger an immediate + /// flush to the disk ([Level.warning], [Level.error] and [Level.fatal] by default). + /// + /// [path] is either treated as directory for rotating or as target file name, + /// depending on [maxFileSizeKB]. + AdvancedFileOutput({ + required String path, + bool overrideExisting = false, + Encoding encoding = utf8, + List? writeImmediately, + Duration maxDelay = const Duration(seconds: 2), + int maxBufferSize = 2000, + int maxFileSizeKB = 1024, + String latestFileName = 'latest.log', + String Function(DateTime timestamp)? fileNameFormatter, + }) : _path = path, + _overrideExisting = overrideExisting, + _encoding = encoding, + _maxDelay = maxDelay, + _maxFileSizeKB = maxFileSizeKB, + _maxBufferSize = maxBufferSize, + _fileNameFormatter = fileNameFormatter ?? _defaultFileNameFormat, + _writeImmediately = writeImmediately ?? + [ + Level.error, + Level.fatal, + Level.warning, + // ignore: deprecated_member_use_from_same_package + Level.wtf, + ], + _file = maxFileSizeKB > 0 ? File('$path/$latestFileName') : File(path); + + /// Logs directory path by default, particular log file path if [_maxFileSizeKB] is 0. + final String _path; + + final bool _overrideExisting; + final Encoding _encoding; + + final List _writeImmediately; + final Duration _maxDelay; + final int _maxFileSizeKB; + final int _maxBufferSize; + final String Function(DateTime timestamp) _fileNameFormatter; + + final File _file; + IOSink? _sink; + Timer? _bufferFlushTimer; + Timer? _targetFileUpdater; + + final List _buffer = []; + + bool get _rotatingFilesMode => _maxFileSizeKB > 0; + + /// Formats the file with a full date string. + /// + /// Example: + /// * `2024-01-01-10-05-02-123.log` + static String _defaultFileNameFormat(DateTime t) { + return '${t.year}-${t.month.toDigits(2)}-${t.day.toDigits(2)}' + '-${t.hour.toDigits(2)}-${t.minute.toDigits(2)}-${t.second.toDigits(2)}' + '-${t.millisecond.toDigits(3)}.log'; + } + + @override + Future init() async { + if (_rotatingFilesMode) { + final dir = Directory(_path); + // We use sync directory check to avoid losing potential initial boot logs + // in early crash scenarios. + if (!dir.existsSync()) { + dir.createSync(recursive: true); + } + + _targetFileUpdater = Timer.periodic( + const Duration(minutes: 1), + (_) => _updateTargetFile(), + ); + } + + _bufferFlushTimer = Timer.periodic(_maxDelay, (_) => _flushBuffer()); + await _openSink(); + if (_rotatingFilesMode) { + await _updateTargetFile(); // Run first check without waiting for timer tick + } + } + + @override + void output(OutputEvent event) { + _buffer.add(event); + // If event level is present in writeImmediately, flush the complete buffer + // along with any other possible elements that accumulated since + // the last timer tick. Additionally, if the buffer is full. + if (_buffer.length > _maxBufferSize || + _writeImmediately.contains(event.level)) { + _flushBuffer(); + } + } + + void _flushBuffer() { + if (_sink == null) return; // Wait until _sink becomes available + for (final event in _buffer) { + _sink?.writeAll(event.lines, Platform.isWindows ? '\r\n' : '\n'); + _sink?.writeln(); + } + _buffer.clear(); + } + + Future _updateTargetFile() async { + try { + if (await _file.exists() && + await _file.length() > _maxFileSizeKB * 1024) { + // Rotate the log file + await _closeSink(); + await _file.rename('$_path/${_fileNameFormatter(DateTime.now())}'); + await _openSink(); + } + } catch (e, s) { + print(e); + print(s); + // Try creating another file and working with it + await _closeSink(); + await _openSink(); + } + } + + Future _openSink() async { + _sink = _file.openWrite( + mode: _overrideExisting ? FileMode.writeOnly : FileMode.writeOnlyAppend, + encoding: _encoding, + ); + } + + Future _closeSink() async { + await _sink?.flush(); + await _sink?.close(); + _sink = null; // Explicitly set null until assigned again + } + + @override + Future destroy() async { + _bufferFlushTimer?.cancel(); + _targetFileUpdater?.cancel(); + try { + _flushBuffer(); + } catch (e, s) { + print('Failed to flush buffer before closing the logger: $e'); + print(s); + } + await _closeSink(); + } +} diff --git a/lib/src/outputs/advanced_file_output_stub.dart b/lib/src/outputs/advanced_file_output_stub.dart new file mode 100644 index 00000000..68483274 --- /dev/null +++ b/lib/src/outputs/advanced_file_output_stub.dart @@ -0,0 +1,63 @@ +import 'dart:convert'; + +import '../log_level.dart'; +import '../log_output.dart'; +import '../output_event.dart'; + +/// Accumulates logs in a buffer to reduce frequent disk, writes while optionally +/// switching to a new log file if it reaches a certain size. +/// +/// [AdvancedFileOutput] offer various improvements over the original +/// [FileOutput]: +/// * Managing an internal buffer which collects the logs and only writes +/// them after a certain period of time to the disk. +/// * Dynamically switching log files instead of using a single one specified +/// by the user, when the current file reaches a specified size limit (optionally). +/// +/// The buffered output can significantly reduce the +/// frequency of file writes, which can be beneficial for (micro-)SD storage +/// and other types of low-cost storage (e.g. on IoT devices). Specific log +/// levels can trigger an immediate flush, without waiting for the next timer +/// tick. +/// +/// New log files are created when the current file reaches the specified size +/// limit. This is useful for writing "archives" of telemetry data and logs +/// while keeping them structured. +class AdvancedFileOutput extends LogOutput { + /// Creates a buffered file output. + /// + /// By default, the log is buffered until either the [maxBufferSize] has been + /// reached, the timer controlled by [maxDelay] has been triggered or an + /// [OutputEvent] contains a [writeImmediately] log level. + /// + /// [maxFileSizeKB] controls the log file rotation. The output automatically + /// switches to a new log file as soon as the current file exceeds it. + /// Use -1 to disable log rotation. + /// + /// [maxDelay] describes the maximum amount of time before the buffer has to be + /// written to the file. + /// + /// Any log levels that are specified in [writeImmediately] trigger an immediate + /// flush to the disk ([Level.warning], [Level.error] and [Level.fatal] by default). + /// + /// [path] is either treated as directory for rotating or as target file name, + /// depending on [maxFileSizeKB]. + AdvancedFileOutput({ + required String path, + bool overrideExisting = false, + Encoding encoding = utf8, + List? writeImmediately, + Duration maxDelay = const Duration(seconds: 2), + int maxBufferSize = 2000, + int maxFileSizeKB = 1024, + String latestFileName = 'latest.log', + String Function(DateTime timestamp)? fileNameFormatter, + }) { + throw UnsupportedError("Not supported on this platform."); + } + + @override + void output(OutputEvent event) { + throw UnsupportedError("Not supported on this platform."); + } +} diff --git a/test/outputs/advanced_file_output_test.dart b/test/outputs/advanced_file_output_test.dart new file mode 100644 index 00000000..ced7f29e --- /dev/null +++ b/test/outputs/advanced_file_output_test.dart @@ -0,0 +1,133 @@ +import 'dart:io'; + +import 'package:logger/logger.dart'; +import 'package:test/test.dart'; + +void main() { + var file = File("${Directory.systemTemp.path}/dart_advanced_logger_test.log"); + var dir = Directory("${Directory.systemTemp.path}/dart_advanced_logger_dir"); + setUp(() async { + await file.create(recursive: true); + await dir.create(recursive: true); + }); + + tearDown(() async { + await file.delete(); + await dir.delete(recursive: true); + }); + + test('Real file read and write with buffer accumulation', () async { + var output = AdvancedFileOutput( + path: file.path, + maxDelay: const Duration(milliseconds: 500), + maxFileSizeKB: 0, + ); + await output.init(); + + final event0 = OutputEvent(LogEvent(Level.info, ""), ["First event"]); + final event1 = OutputEvent(LogEvent(Level.info, ""), ["Second event"]); + final event2 = OutputEvent(LogEvent(Level.info, ""), ["Third event"]); + + output.output(event0); + output.output(event1); + output.output(event2); + + // Wait until buffer is flushed to file + await Future.delayed(const Duration(seconds: 1)); + + await output.destroy(); + + var content = await file.readAsString(); + expect( + content, + allOf( + contains("First event"), + contains("Second event"), + contains("Third event"), + ), + ); + }); + + test('Real file read and write with rotating file names and immediate output', + () async { + var output = AdvancedFileOutput( + path: dir.path, + writeImmediately: [Level.info], + ); + await output.init(); + + final event0 = OutputEvent(LogEvent(Level.info, ""), ["First event"]); + final event1 = OutputEvent(LogEvent(Level.info, ""), ["Second event"]); + final event2 = OutputEvent(LogEvent(Level.info, ""), ["Third event"]); + + output.output(event0); + output.output(event1); + output.output(event2); + + await output.destroy(); + + final logFile = File('${dir.path}/latest.log'); + var content = await logFile.readAsString(); + expect( + content, + allOf( + contains("First event"), + contains("Second event"), + contains("Third event"), + ), + ); + }); + + test('Rolling files', () async { + var output = AdvancedFileOutput( + path: dir.path, + maxFileSizeKB: 1, + ); + await output.init(); + final event0 = OutputEvent(LogEvent(Level.fatal, ""), ["1" * 1500]); + output.output(event0); + await output.destroy(); + + // Start again to roll files on init without waiting for timer tick + await output.init(); + final event1 = OutputEvent(LogEvent(Level.fatal, ""), ["2" * 1500]); + output.output(event1); + await output.destroy(); + + // And again for another roll + await output.init(); + final event2 = OutputEvent(LogEvent(Level.fatal, ""), ["3" * 1500]); + output.output(event2); + await output.destroy(); + + final files = dir.listSync(); + + expect( + files, + (hasLength(3)), + ); + }); + + test('Flush temporary buffer on destroy', () async { + var output = AdvancedFileOutput(path: dir.path); + await output.init(); + + final event0 = OutputEvent(LogEvent(Level.info, ""), ["Last event"]); + final event1 = OutputEvent(LogEvent(Level.info, ""), ["Very last event"]); + + output.output(event0); + output.output(event1); + + await output.destroy(); + + final logFile = File('${dir.path}/latest.log'); + var content = await logFile.readAsString(); + expect( + content, + allOf( + contains("Last event"), + contains("Very last event"), + ), + ); + }); +}