diff --git a/3rdparty/cutlass_fpA_intB_gemm b/3rdparty/cutlass_fpA_intB_gemm index e9dfd172ca4f..c633ae800283 160000 --- a/3rdparty/cutlass_fpA_intB_gemm +++ b/3rdparty/cutlass_fpA_intB_gemm @@ -1 +1 @@ -Subproject commit e9dfd172ca4f32ad3fd20e46259b35159390cf91 +Subproject commit c633ae800283627a62e69e064d05a28ff13d380a diff --git a/3rdparty/dlpack b/3rdparty/dlpack deleted file mode 160000 index 3ea601bb4130..000000000000 --- a/3rdparty/dlpack +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 3ea601bb413074c49a77c4ce3218bc08f8c4703c diff --git a/3rdparty/libbacktrace b/3rdparty/libbacktrace deleted file mode 160000 index 08f7c7e69f8e..000000000000 --- a/3rdparty/libbacktrace +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 08f7c7e69f8ea61a0c4151359bc8023be8e9217b diff --git a/python/tvm/relax/backend/cuda/flashinfer.py b/python/tvm/relax/backend/cuda/flashinfer.py index 47a4946ca97d..a1f8ea58a24a 100644 --- a/python/tvm/relax/backend/cuda/flashinfer.py +++ b/python/tvm/relax/backend/cuda/flashinfer.py @@ -20,6 +20,7 @@ import json import os import subprocess +from typing import Optional, Tuple from concurrent.futures import ThreadPoolExecutor from pathlib import Path from typing import List @@ -114,7 +115,7 @@ def get_object_file_path(src: Path) -> Path: # Determine compute version compute_version = "".join(tvm.contrib.nvcc.get_target_compute_version(target).split(".")) - if compute_version in ["90"]: + if compute_version in ["90", "100"]: compute_version += "a" cuda_cflags += [ "-gencode", @@ -447,3 +448,96 @@ def gen_sampling_module(target: Target, num_threads: int = 8): object_files = _compile_flashinfer_kernels(uri, source_paths, target, num_threads) modules = _load_flashinfer_modules(object_files) return modules + +def gen_grouped_gemm_module( + dtype_a: str, + dtype_b: str, + dtype_out: str, + scale_granularity_m: int, + scale_granularity_n: int, + scale_granularity_k: int, + scale_major_mode: str, + mma_sm: int, + target: Target, + num_threads: int = 8, +) -> List[tvm.runtime.Module]: + """Generate a FlashInfer module for FP8 grouped GEMM. + + Parameters + ---------- + dtype_a : str + The data type of matrix A (e.g., "float8_e4m3fn"). + dtype_b : str + The data type of matrix B (e.g., "float8_e4m3fn"). + dtype_out : str + The data type of the output matrix (e.g., "bfloat16"). + scale_granularity_m : int + The scaling granularity in the M dimension. + scale_granularity_n : int + The scaling granularity in the N dimension. + scale_granularity_k : int + The scaling granularity in the K dimension. + scale_major_mode : str + The scale storage mode ("K" or "MN"). + mma_sm : int + The MMA scheduling mode (1 or 2). + target : Target + The target device to compile for. + num_threads : int + The number of threads to use for compilation. + + Returns + ------- + List[tvm.runtime.Module] + A list of compiled static library modules for FlashInfer FP8 grouped GEMM kernels. + + Note + _____ + when apply grouped gemm on A: (total_m, k), B: (batch_size, n, k), m_indptr: (batch_size, ) + requires all m in m_indptr to be multiple of 4 + """ + try: + from flashinfer.jit import ( + gen_grouped_gemm_fp8_tvm_binding, + get_grouped_gemm_fp8_uri, + ) + except ImportError: + raise ImportError( + "FlashInfer is not installed. Please follow instructions " + "in https://docs.flashinfer.ai to install FlashInfer." + ) + try: + import torch + except ImportError: + raise ImportError("PyTorch is not installed. Please install PyTorch to use FlashInfer.") + + torch_dtype_a = getattr(torch, dtype_a) + torch_dtype_b = getattr(torch, dtype_b) + torch_dtype_out = getattr(torch, dtype_out) + + uri = get_grouped_gemm_fp8_uri( + dtype_a=torch_dtype_a, + dtype_b=torch_dtype_b, + dtype_out=torch_dtype_out, + scale_granularity_m=scale_granularity_m, + scale_granularity_n=scale_granularity_n, + scale_granularity_k=scale_granularity_k, + scale_major_mode=scale_major_mode, + mma_sm=mma_sm, + ) + + uri, source_paths = gen_grouped_gemm_fp8_tvm_binding( + uri=uri, + dtype_a=torch_dtype_a, + dtype_b=torch_dtype_b, + dtype_out=torch_dtype_out, + scale_granularity_m=scale_granularity_m, + scale_granularity_n=scale_granularity_n, + scale_granularity_k=scale_granularity_k, + scale_major_mode=scale_major_mode, + mma_sm=mma_sm, + ) + + object_files = _compile_flashinfer_kernels(uri, source_paths, target, num_threads) + modules = _load_flashinfer_modules(object_files) + return modules \ No newline at end of file diff --git a/tests/python/relax/test_group_gemm_flashinfer.py b/tests/python/relax/test_group_gemm_flashinfer.py new file mode 100644 index 000000000000..c669fbfad3af --- /dev/null +++ b/tests/python/relax/test_group_gemm_flashinfer.py @@ -0,0 +1,500 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Test for FlashInfer GroupedGemm TVM integration""" + +import math +import numpy as np +import pytest +import torch +from einops import rearrange, reduce, repeat + +import tvm +import tvm.testing +from tvm import relax +from tvm.contrib import utils +from tvm.relax.backend.cuda import flashinfer + +DEFAULT_WORKSPACE_SIZE = 32 * 1024 * 1024 +fp8_dtype = "float8_e4m3fn" + + +########################################### +################# Helpers ################# +########################################### +def has_flashinfer(): + """Check if FlashInfer is available""" + try: + from tvm.relax.backend.cuda import ( # pylint: disable=import-outside-toplevel + flashinfer, + ) + + return True + except ImportError: + return False + + +def has_cutlass(): + """Check if CUTLASS is available for SM90+ operations""" + if not tvm.get_global_func("device_api.cuda", True): + return False + try: + import pynvml # pylint: disable=import-outside-toplevel + + pynvml.nvmlInit() + handle = pynvml.nvmlDeviceGetHandleByIndex(0) + major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(handle) + return major >= 9 # SM90+ + except: + return False + +def calc_diff(x: np.ndarray, y: np.ndarray): + denominator = (x * x + y * y).sum() + sim = 2 * (x * y).sum() / denominator + return 1 - sim + +def quantize_fp8(x, scale_shape, tile_shape, scale_major_mode): + """ + Quantizes a 2D or 3D tensor to FP8. + + Args: + x (torch.Tensor): The 2D or 3D input tensor. + scale_shape (tuple): The shape of the scale tensor. + tile_shape (tuple): The shape of the tiles. + scale_major_mode (str): The tiling order, "K" for row-major like, + or another value for column-major like. + + Returns: + tuple: A tuple containing the quantized FP8 tensor and the + calculated float32 scales. + """ + # 1. Assertions and Initial Setup + ndim = x.ndim + assert ndim == len(scale_shape) == len(tile_shape) + + fp8_info = torch.finfo(torch.float8_e4m3fn) + fp8_amax = torch.tensor(fp8_info.max, device=x.device, dtype=torch.float32) + + # 2. Tiling and Scale Calculation + if ndim == 2: + s0, s1 = scale_shape + t0, t1 = tile_shape + if scale_major_mode == "K": + # Tile x and find the max absolute value in each tile + x_tiled = rearrange(x, "(s0 t0) (s1 t1) -> s0 s1 t0 t1", s0=s0, s1=s1) + abs_max = reduce(x_tiled.abs(), "s0 s1 t0 t1 -> s0 s1", "max").clamp(1e-4) + x_scale = abs_max / fp8_amax + x_scale = torch.pow(2.0, torch.ceil(torch.log2(x_scale.abs()))) + + # Broadcast scales back to the original tensor shape + scales_repeated = repeat(x_scale, "s0 s1 -> (s0 t0) (s1 t1)", t0=t0, t1=t1) + else: + # Handle column-major tiling + x_tiled = rearrange(x, "(s1 t0) (s0 t1) -> s0 s1 t0 t1", s0=s0, s1=s1) + abs_max = reduce(x_tiled.abs(), "s0 s1 t0 t1 -> s0 s1", "max").clamp(1e-4) + x_scale = abs_max / fp8_amax + x_scale = torch.pow(2.0, torch.ceil(torch.log2(x_scale.abs()))) + + # Permute scale axes before repeating to match layout + scales_permuted = rearrange(x_scale, "s0 s1 -> s1 s0") + scales_repeated = repeat(scales_permuted, "s1 s0 -> (s1 t0) (s0 t1)", t0=t0, t1=t1) + + elif ndim == 3: + s0, s1, s2 = scale_shape + t0, t1, t2 = tile_shape + if scale_major_mode == "K": + # Tile x and find the max absolute value in each tile + x_tiled = rearrange( + x, "(s0 t0) (s1 t1) (s2 t2) -> s0 s1 s2 t0 t1 t2", s0=s0, s1=s1, s2=s2 + ) + abs_max = reduce( + x_tiled.abs(), "s0 s1 s2 t0 t1 t2 -> s0 s1 s2", "max" + ).clamp(1e-4) + x_scale = abs_max / fp8_amax + x_scale = torch.pow(2.0, torch.ceil(torch.log2(x_scale.abs()))) + + # Broadcast scales back to the original tensor shape + scales_repeated = repeat( + x_scale, "s0 s1 s2 -> (s0 t0) (s1 t1) (s2 t2)", t0=t0, t1=t1, t2=t2 + ) + else: + # Handle layout where the last two axes are swapped + x_tiled = rearrange( + x, "(s0 t0) (s2 t1) (s1 t2) -> s0 s1 s2 t0 t1 t2", s0=s0, s1=s1, s2=s2 + ) + abs_max = reduce( + x_tiled.abs(), "s0 s1 s2 t0 t1 t2 -> s0 s1 s2", "max" + ).clamp(1e-4) + x_scale = abs_max / fp8_amax + x_scale = torch.pow(2.0, torch.ceil(torch.log2(x_scale.abs()))) + # Permute scale axes before repeating to match layout + scales_permuted = rearrange(x_scale, "s0 s1 s2 -> s0 s2 s1") + scales_repeated = repeat( + scales_permuted, + "s0 s2 s1 -> (s0 t0) (s2 t1) (s1 t2)", + t0=t0, + t1=t1, + t2=t2, + ) + # 3. Final Quantization + # Divide the original tensor by the broadcasted scales + x_fp32 = x / (scales_repeated + 1e-8) + + # Convert the result to the target FP8 format + x_fp8 = x_fp32.to(torch.float8_e4m3fn) + + return x_fp8, x_scale + + +def dequantize_fp8(x, x_scale, scale_major_mode): + """ + Quantizes a 2D or 3D tensor to FP8. + + Args: + x (torch.Tensor): The 2D or 3D input tensor. + scale_shape (tuple): The shape of the scale tensor. + tile_shape (tuple): The shape of the tiles. + scale_major_mode (str): The tiling order, "K" for row-major like, + or another value for column-major like. + + Returns: + tuple: A tuple containing the quantized FP8 tensor and the + calculated float32 scales. + """ + # 1. Assertions and Initial Setup + ndim = x.ndim + assert ndim == len(x_scale.shape) + + # 2. Tiling and Scale Calculation + if ndim == 2: + if scale_major_mode == "K": + s0, s1 = x_scale.shape + else: + s1, s0 = x_scale.shape + x = rearrange(x.to(torch.float32), "(s0 t0) (s1 t1) -> s0 s1 t0 t1", s0=s0, s1=s1) + if scale_major_mode == "K": + x_scale = rearrange(x_scale, "s0 s1 -> s0 s1 1 1") + else: + x_scale = rearrange(x_scale, "s0 s1 -> s1 s0 1 1") + out = rearrange(x * x_scale, "s0 s1 t0 t1 -> (s0 t0) (s1 t1)") + elif ndim == 3: + if scale_major_mode == "K": + s0, s1, s2 = x_scale.shape + else: + s0, s2, s1 = x_scale.shape + x = rearrange( + x.to(torch.float32), + "(s0 t0) (s1 t1) (s2 t2)-> s0 s1 s2 t0 t1 t2", + s0=s0, + s1=s1, + s2=s2, + ) + if scale_major_mode == "K": + x_scale = rearrange(x_scale, "s0 s1 s2 -> s0 s1 s2 1 1 1") + else: + x_scale = rearrange(x_scale, "s0 s1 s2 -> s0 s2 s1 1 1 1") + out = rearrange(x * x_scale, "s0 s1 s2 t0 t1 t2 -> (s0 t0) (s1 t1) (s2 t2)") + + return out + + +########################################### +########### Refernce generation ########### +########################################### +def compute_reference_grouped_gemm( + a_fp32: torch.Tensor, # (total_m, k) + b_fp32: torch.Tensor, # (batch_size, n, k) + m_indptr: torch.Tensor, + dtype_out: str, # (total_m, n) +): + """Compute reference result using PyTorch operations""" + """Compute reference result using original FP32 tensors""" + + total_m, k = a_fp32.shape + batch_size, n, k2 = b_fp32.shape + assert k == k2 + + # Perform grouped GEMM computation directly on original FP32 data + results = [] + + for i in range(batch_size): + start_m = m_indptr[i].item() + end_m = m_indptr[i + 1].item() + + # Extract group's portion of A + a_group = a_fp32[start_m:end_m, :] # [m_sizes[i], k] + b_group = b_fp32[i] + + # Multiply with shared B matrix + result_group = torch.mm(a_group, b_group.T) # [m_sizes[i], n] + results.append(result_group) + + result_fp32 = torch.cat(results, dim=0) + + # Convert to output dtype + if dtype_out == "bfloat16": + result = result_fp32.to(torch.bfloat16) + elif dtype_out == "float16": + result = result_fp32.to(torch.float16) + else: + result = result_fp32 + + return result + + +########################################### +########### Test data generation ########## +########################################### +def generate_test_data( + m_sizes: list, + batch_size: int, + n: int, + k: int, + dtype_a: str, + dtype_b: str, + dtype_out: str, + scale_granularity_m: int, + scale_granularity_n: int, + scale_granularity_k: int, + scale_major_mode: str, + device: tvm.runtime.Device, +): + """Generate test data for grouped GEMM operations""" + assert batch_size == len( + m_sizes + ), f"batch_size ({batch_size}) must equal len(m_sizes) ({len(m_sizes)})" + + torch_device = torch.device(f"cuda:{device.device_id}") + + cum_m = [0] + list(np.cumsum(m_sizes)) + total_m = cum_m[-1] + + # Generate input matrices A and B (where we assert of form fp8) random data in fp32 first, then convert + assert dtype_a == "float8_e4m3fn" + a_fp32 = torch.randn(total_m, k, device=torch_device, dtype=torch.float32) + + assert dtype_b == "float8_e4m3fn" + b_fp32 = torch.randn(batch_size, n, k, device=torch_device, dtype=torch.float32) / math.sqrt(k) + + if scale_major_mode == "K": # K mode: + scale_a_shape = (total_m // scale_granularity_m, k // scale_granularity_k) + scale_b_shape = (batch_size, n // scale_granularity_n, k // scale_granularity_k) + + else: # MN mode + scale_a_shape = (k // scale_granularity_k, total_m // scale_granularity_m) + scale_b_shape = (batch_size, k // scale_granularity_k, n // scale_granularity_n) + + tile_a_shape = (scale_granularity_m, scale_granularity_k) + tile_b_shape = (1, scale_granularity_n, scale_granularity_k) + + # quantize A, B + a_quantized, scale_a = quantize_fp8(a_fp32, scale_a_shape, tile_a_shape, scale_major_mode) + b_quantized, scale_b = quantize_fp8(b_fp32, scale_b_shape, tile_b_shape, scale_major_mode) + + if dtype_a == "float8_e4m3fn": + a_tvm = tvm.nd.array( + a_quantized.view(torch.uint8).cpu().numpy().view(fp8_dtype), device=device + ) + else: + a_tvm = tvm.nd.from_dlpack(a_quantized) + + if dtype_b == "float8_e4m3fn": + b_tvm = tvm.nd.array( + b_quantized.view(torch.uint8).cpu().numpy().view(fp8_dtype), device=device + ) + else: + b_tvm = tvm.nd.from_dlpack(b_quantized) + + scale_a_tvm = tvm.nd.from_dlpack(scale_a) + scale_b_tvm = tvm.nd.from_dlpack(scale_b) + + # Create m_indptr for grouped operation + m_indptr = torch.tensor(cum_m, device=torch_device, dtype=torch.int32) + m_indptr_tvm = tvm.nd.array(m_indptr.cpu().numpy(), device) + + return { + "a": a_tvm, + "b": b_tvm, + "torch_a": a_fp32, + "torch_b": b_fp32, + "scale_a": scale_a_tvm, + "scale_b": scale_b_tvm, + "m_indptr": m_indptr_tvm, + "m_sizes": m_sizes, + "n": n, + "k": k, + "total_m": total_m, + "torch_scale_a": scale_a, + "torch_scale_b": scale_b, + "torch_m_indptr": m_indptr, + } + + +########################################### +############### Test driver ############### +########################################### +@pytest.mark.skipif(not has_flashinfer(), reason="FlashInfer not available") +@pytest.mark.skipif(not has_cutlass(), reason="CUTLASS SM90+ not available") +@pytest.mark.parametrize( + "dtype_a,dtype_b,dtype_out", + [ + ("float8_e4m3fn", "float8_e4m3fn", "bfloat16"), + ("float8_e4m3fn", "float8_e4m3fn", "float16"), + ], +) +@pytest.mark.parametrize( + "scale_granularity_m,scale_granularity_n,scale_granularity_k", + [ + (1, 128, 128), # Row-wise A, block-wise B + ], +) +@pytest.mark.parametrize("scale_major_mode", ["K", "MN"]) +@pytest.mark.parametrize("mma_sm", [1, 2]) +@pytest.mark.parametrize( + "test_case", + [ + {"batch_size": 4, "m_sizes": [128, 256, 192, 320], "n": 512, "k": 1024}, + {"batch_size": 2, "m_sizes": [64, 128], "n": 256, "k": 512}, + {"batch_size": 3, "m_sizes": [256, 256, 128], "n": 768, "k": 768}, + {"batch_size": 2, "m_sizes": [20, 36], "n": 768, "k": 768}, + ], +) +def test_grouped_gemm_correctness( + dtype_a, + dtype_b, + dtype_out, + scale_granularity_m, + scale_granularity_n, + scale_granularity_k, + scale_major_mode, + mma_sm, + test_case, +): + """Test correctness of GroupedGemm operations""" + device = tvm.cuda(0) + target = tvm.target.Target.from_device(device) + + def _load_module(name: str, static_modules): + """Helper function to load compiled modules.""" + assert len(static_modules) > 0 + if len(static_modules) == 1: + return static_modules[0] + static_mod = static_modules[0] + for mod in static_modules[1:]: + static_mod.import_module(mod) + temp = tvm.contrib.utils.tempdir() + mod_path = temp.relpath(f"{name}.so") + static_mod.export_library(mod_path) + return tvm.runtime.load_module(mod_path) + + # Generate the module + modules = relax.backend.cuda.flashinfer.gen_grouped_gemm_module( + dtype_a=dtype_a, + dtype_b=dtype_b, + dtype_out=dtype_out, + scale_granularity_m=scale_granularity_m, + scale_granularity_n=scale_granularity_n, + scale_granularity_k=scale_granularity_k, + scale_major_mode=scale_major_mode, + mma_sm=mma_sm, + target=target, + num_threads=4, + ) + + # Load the module + mod = _load_module("flashinfer_grouped_gemm", modules) + grouped_gemm_fn = mod["grouped_gemm_fp8_run"] + + # Generate test data + test_data = generate_test_data( + batch_size=test_case["batch_size"], + m_sizes=test_case["m_sizes"], + n=test_case["n"], + k=test_case["k"], + dtype_a=dtype_a, + dtype_b=dtype_b, + dtype_out=dtype_out, + scale_granularity_m=scale_granularity_m, + scale_granularity_n=scale_granularity_n, + scale_granularity_k=scale_granularity_k, + scale_major_mode=scale_major_mode, + device=device, + ) + + # Prepare output buffer + output_shape = (test_data["total_m"], test_data["n"]) + if dtype_out == "bfloat16": + output = tvm.nd.empty(output_shape, dtype="bfloat16", device=device) + elif dtype_out == "float16": + output = tvm.nd.empty(output_shape, dtype="float16", device=device) + else: + output = tvm.nd.empty(output_shape, dtype="float32", device=device) + + # Create workspace buffers (required by the interface) + int_workspace = tvm.nd.empty((DEFAULT_WORKSPACE_SIZE,), dtype="int32", device=device) + float_workspace = tvm.nd.empty((DEFAULT_WORKSPACE_SIZE,), dtype="float32", device=device) + + grouped_gemm_fn( + int_workspace, # int_workspace_buffer + float_workspace, # float_workspace_buffer + test_data["a"], # A + test_data["b"], # B + test_data["scale_a"], # SFA + test_data["scale_b"], # SFB + output, # D + test_data["m_indptr"], # m_indptr + test_data["n"], # n (scalar) + test_data["k"], # k (scalar) + None, # cuda_stream (use default stream) + ) + + # Compute reference result + reference = compute_reference_grouped_gemm( + test_data['torch_a'], + test_data['torch_b'], + test_data["torch_m_indptr"], + dtype_out, + ) + + # Convert TVM output to PyTorch for comparison + output_torch = torch.as_tensor(output, device=test_data["torch_a"].device) + output_torch + + # Compare results with appropriate tolerance + if dtype_out == "bfloat16": + rtol, atol = 1e-2, 1e-2 + elif dtype_out == "float16": + rtol, atol = 1e-3, 1e-3 + else: + rtol, atol = 1e-4, 1e-4 + + # Check shapes match + assert ( + output_torch.shape == reference.shape + ), f"Shape mismatch: got {output_torch.shape}, expected {reference.shape}" + + + diff = calc_diff( + output_torch.cpu().double().numpy(), + reference.cpu().double().numpy() + ) + assert diff < 1e-3, f"diff too large {diff}" + + +if __name__ == "__main__": + tvm.testing.main() + diff --git a/web/package-lock.json b/web/package-lock.json index 79ea7dfecd62..8eb04e2fada3 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -8,6 +8,10 @@ "name": "tvmjs", "version": "0.22.0-dev0", "license": "Apache-2.0", + "dependencies": { + "audit": "^0.0.6", + "fix": "^0.0.6" + }, "devDependencies": { "@rollup/plugin-commonjs": "^20.0.0", "@rollup/plugin-node-resolve": "^13.0.4", @@ -1993,6 +1997,14 @@ "node": ">= 4.5.0" } }, + "node_modules/audit": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/audit/-/audit-0.0.6.tgz", + "integrity": "sha512-xgv3Y3RIYE00N2/xk10VLlwFd1kjc7FRaX1vC8+CsOfDRe53a06vOSkp91BOSNijZfddYum47a1Fvju/2+JPcw==", + "engines": { + "node": ">= 0.5.0" + } + }, "node_modules/babel-jest": { "version": "26.6.3", "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-26.6.3.tgz", @@ -2357,6 +2369,20 @@ "node": ">=0.10.0" } }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -2807,6 +2833,21 @@ "node": ">=8" } }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/electron-to-chromium": { "version": "1.4.284", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.284.tgz", @@ -2849,6 +2890,55 @@ "is-arrayish": "^0.2.1" } }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/escalade": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", @@ -3747,6 +3837,19 @@ "node": ">=8" } }, + "node_modules/fix": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/fix/-/fix-0.0.6.tgz", + "integrity": "sha512-UQ+8m0GnIakgpY+92a9y+pYoX3Y6eaW7WNTkPolQ7r58Fjzq7NhyRLMrZ6J6U1u4y7H7APugjRmZ+i6CAn4+Dg==", + "dependencies": { + "pipe": "0.0.2", + "underscore": "1.1.6", + "underscore.string": "1.1.4" + }, + "engines": { + "node": ">=0.4.8" + } + }, "node_modules/flat-cache": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", @@ -3776,14 +3879,17 @@ } }, "node_modules/form-data": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz", - "integrity": "sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.4.tgz", + "integrity": "sha512-f0cRzm6dkyVYV3nPoooP8XlccPQukegwhAnpoLcXy+X+A8KfpGOoXwDr9FLZd3wzgLaBGQBE3lY93Zm/i1JvIQ==", "dev": true, + "license": "MIT", "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.35" }, "engines": { "node": ">= 6" @@ -3823,10 +3929,14 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/gensync": { "version": "1.0.0-beta.2", @@ -3846,6 +3956,31 @@ "node": "6.* || 8.* || >= 10.*" } }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/get-package-type": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", @@ -3855,6 +3990,20 @@ "node": ">=8.0.0" } }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/get-stream": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", @@ -3955,6 +4104,19 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/graceful-fs": { "version": "4.2.10", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", @@ -4001,6 +4163,35 @@ "node": ">=4" } }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/has-value": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", @@ -4064,6 +4255,19 @@ "node": ">=0.10.0" } }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/hosted-git-info": { "version": "2.8.9", "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", @@ -6553,6 +6757,16 @@ "node": ">= 12" } }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/merge-stream": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", @@ -7059,6 +7273,14 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/pipe": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/pipe/-/pipe-0.0.2.tgz", + "integrity": "sha512-67s0/X7rv2PX1sl64FQqC0qQuSpd1tv8Wh6c+U1lprj6Q7NxDYulCxZTbVbDvc/HSpZLYh7Oo821xReXSCZikQ==", + "engines": { + "node": ">=0.4.8" + } + }, "node_modules/pirates": { "version": "4.0.5", "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.5.tgz", @@ -8539,6 +8761,25 @@ "node": ">=4.2.0" } }, + "node_modules/underscore": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.1.6.tgz", + "integrity": "sha512-aqSzrO92Cjmeo8G7F49+ZHWBo3IJpjpsUZZaqfOHJGN61flbpLxQw/sP91p4kf/2+nkFrG6AG2WHlJh6RCf+/g==", + "engines": { + "node": "*" + } + }, + "node_modules/underscore.string": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/underscore.string/-/underscore.string-1.1.4.tgz", + "integrity": "sha512-WsF8NWzIbTvxUaSOpSLq+AiO0tzweXdWQZ4w9Op8S/1BT9Fh7hCS7bfrF17vZu9kJg3pcqO+8WXfQSr1ah0f2g==", + "dependencies": { + "underscore": "1.1.6" + }, + "engines": { + "node": "*" + } + }, "node_modules/union-value": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz",