Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

node: Improve live progress output (and other small tweaks) #315

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 62 additions & 10 deletions node/flatpak_node_generator/main.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,16 @@
from pathlib import Path
from typing import Iterator, List, Set
from typing import Any, ContextManager, Iterator, List, Optional, Set

import argparse
import asyncio
import contextlib
import json
import os
import sys
import time

from rich.console import Console, Group, RenderableType
from rich.live import Live

from .cache import Cache, FilesystemBasedCache
from .manifest import ManifestGenerator
Expand All @@ -18,6 +23,8 @@
from .providers.yarn import YarnProviderFactory
from .requests import Requests, StubRequests

_CONSOLE_REFRESH_PER_SECOND = 12.5


def _scan_for_lockfiles(base: Path, patterns: List[str]) -> Iterator[Path]:
for root, _, files in os.walk(base.parent):
Expand Down Expand Up @@ -52,6 +59,11 @@ async def _async_main() -> None:
action='append',
help='Given -r, restrict files to those matching the given pattern.',
)
parser.add_argument(
'--no-live-progress',
action='store_true',
help='Disable live progress output',
)
parser.add_argument(
'--registry',
help='The registry to use (npm only)',
Expand Down Expand Up @@ -135,13 +147,20 @@ async def _async_main() -> None:
dest='xdg_layout',
help="Don't use the XDG layout for caches",
)
# Internal option, useful for testing.
# Internal options, useful for testing.
parser.add_argument('--stub-requests', action='store_true', help=argparse.SUPPRESS)
parser.add_argument(
'--traceback-on-interrupt',
action='store_true',
help=argparse.SUPPRESS,
)

args = parser.parse_args()

Requests.retries = args.retries

console = Console() if not args.no_live_progress and sys.stdout.isatty else None

if args.type == 'yarn' and (args.no_devel or args.no_autopatch):
sys.exit('--no-devel and --no-autopatch do not apply to Yarn.')

Expand Down Expand Up @@ -187,6 +206,8 @@ async def _async_main() -> None:
else:
assert False, args.type

start_time = time.monotonic()

print('Reading packages from lockfiles...')
packages: Set[Package] = set()
rcfile_node_headers: Set[NodeHeaders] = set()
Expand Down Expand Up @@ -220,16 +241,45 @@ async def _async_main() -> None:
)
special = SpecialSourceProvider(gen, options)

with provider_factory.create_module_provider(gen, special) as module_provider:
with GeneratorProgress(
live: ContextManager[Any]
if console is not None:
requests_renderable = Requests.instance.get_renderable(console)
generator_renderable: Optional[RenderableType] = None

def get_renderable() -> RenderableType:
if generator_renderable is not None:
return Group(
requests_renderable,
generator_renderable,
)
else:
return requests_renderable

live = Live(
get_renderable=get_renderable,
refresh_per_second=_CONSOLE_REFRESH_PER_SECOND,
console=console,
)
else:
live = contextlib.nullcontext()

with live:
with provider_factory.create_module_provider(
gen, special
) as module_provider, GeneratorProgress(
packages,
module_provider,
args.max_parallel,
max_parallel=args.max_parallel,
traceback_on_interrupt=args.traceback_on_interrupt,
) as progress:
if console is not None:
generator_renderable = progress.get_renderable(console)

await progress.run()
for headers in rcfile_node_headers:
print(f'Generating headers {headers.runtime} @ {headers.target}')
await special.generate_node_headers(headers)

for headers in rcfile_node_headers:
print(f'Generating headers {headers.runtime} @ {headers.target}...')
await special.generate_node_headers(headers)

if args.xdg_layout:
script_name = 'setup_sdk_node_headers.sh'
Expand All @@ -246,6 +296,8 @@ async def _async_main() -> None:
)
gen.add_command(f'bash {gen.data_root / script_name}')

elapsed = round(time.monotonic() - start_time, 1)

if args.split:
i = 0
for i, part in enumerate(gen.split_sources()):
Expand All @@ -254,7 +306,7 @@ async def _async_main() -> None:
with open(output, 'w') as fp:
json.dump(part, fp, indent=ManifestGenerator.JSON_INDENT)

print(f'Wrote {gen.source_count} to {i + 1} file(s).')
print(f'Wrote {gen.source_count} to {i + 1} file(s) in {elapsed} second(s).')
else:
with open(args.output, 'w') as fp:
json.dump(
Expand All @@ -270,7 +322,7 @@ async def _async_main() -> None:
)
print(' (Pass -s to enable splitting.)')

print(f'Wrote {gen.source_count} source(s).')
print(f'Wrote {gen.source_count} source(s) in {elapsed} second(s).')


def main() -> None:
Expand Down
150 changes: 115 additions & 35 deletions node/flatpak_node_generator/progress.py
Original file line number Diff line number Diff line change
@@ -1,73 +1,153 @@
from typing import Collection, ContextManager, Optional, Type
from dataclasses import dataclass
from typing import Collection, ContextManager, Optional, Set, Type

import asyncio
import shutil
import sys
import traceback
import types

from rich.console import (
Console,
ConsoleOptions,
ConsoleRenderable,
RenderableType,
RenderResult,
)
from rich.measure import Measurement
from rich.segment import Segment
from rich.status import Status

from .package import Package
from .providers import ModuleProvider


def _generating_packages(finished: int, total: int) -> str:
return f'Generating packages [{finished}/{total}]'


class _GeneratingPackagesRenderable(ConsoleRenderable):
def __init__(self, finished: int, total: int, processing: Set[Package]) -> None:
self.generating_string = _generating_packages(finished, total)
self.processing = processing

def __rich_measure__(
self, console: Console, options: ConsoleOptions
) -> Measurement:
return Measurement(0, options.max_width)

def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
ARROW = ' => '
ELLIPSES = '...'
SEPARATOR = ', '

yield Segment(self.generating_string)
space_remaining = options.max_width - len(self.generating_string)

generating_string_width = len(self.generating_string)
if space_remaining < len(ELLIPSES):
return
elif options.max_width < len(ELLIPSES) + len(ARROW):
return ELLIPSES

packages = sorted(
f'{package.name} @ {package.version}' for package in self.processing
)

yield Segment(ARROW)
space_remaining -= len(ARROW) + len(ELLIPSES)

for i, package in enumerate(packages):
if i:
package = SEPARATOR + package
if len(package) > space_remaining:
break

yield Segment(package)
space_remaining -= len(package)

yield Segment(ELLIPSES)


class GeneratorProgress(ContextManager['GeneratorProgress']):
def __init__(
self,
packages: Collection[Package],
module_provider: ModuleProvider,
*,
max_parallel: int,
traceback_on_interrupt: bool,
) -> None:
self.finished = 0
self.processing: Set[Package] = set()
self.packages = packages
self.module_provider = module_provider
self.parallel_limit = asyncio.Semaphore(max_parallel)
self.previous_package: Optional[Package] = None
self.current_package: Optional[Package] = None
self.traceback_on_interrupt = traceback_on_interrupt
self.status: Optional[Status] = None

@property
def _total(self) -> int:
return len(self.packages)

def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
tb: Optional[types.TracebackType],
) -> None:
print()

def _format_package(self, package: Package, max_width: int) -> str:
result = f'{package.name} @ {package.version}'

if len(result) > max_width:
result = result[: max_width - 3] + '...'

return result
line = f'Generated {self._total} package(s).'
if self.status is not None:
self.status.update(line)
self.status.stop()
else:
print(line)

def _update(self) -> None:
columns, _ = shutil.get_terminal_size()

sys.stdout.write('\r' + ' ' * columns)

prefix_string = f'\rGenerating packages [{self.finished}/{len(self.packages)}] '
sys.stdout.write(prefix_string)
max_package_width = columns - len(prefix_string)

if self.current_package is not None:
sys.stdout.write(
self._format_package(self.current_package, max_package_width)
)

sys.stdout.flush()

def _update_with_package(self, package: Package) -> None:
self.previous_package, self.current_package = (
self.current_package,
package,
if self.status is None:
# No TTY. Only print an update on multiples of 10 to avoid spamming
# the console.
if self.finished % 10 == 0 or self.finished == self._total:
print(
f'{_generating_packages(self.finished, self._total)}...',
flush=True,
)
return

self.status.update(
_GeneratingPackagesRenderable(self.finished, self._total, self.processing)
)
self._update()

async def _generate(self, package: Package) -> None:
async with self.parallel_limit:
self._update_with_package(package)
await self.module_provider.generate_package(package)
self.processing.add(package)
# Don't bother printing an update here without live progress, since
# then the currently processing packages won't appear anyway.
if self.status is not None:
self._update()

try:
await self.module_provider.generate_package(package)
except asyncio.CancelledError:
if self.traceback_on_interrupt:
print(f'========== {package.name} ==========', file=sys.stderr)
traceback.print_exc()
print(file=sys.stderr)
raise

self.finished += 1
self._update_with_package(package)
self.processing.remove(package)
self._update()

def get_renderable(self, console: Console) -> RenderableType:
if self.status is not None:
assert self.status.console is console
else:
self.status = Status('', console=console)

return self.status

async def run(self) -> None:
self._update()
Expand Down
Loading