Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Batch reading content files to prevent too many open files error #12079

Merged
merged 5 commits into from
Sep 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Fix incorrectly generated CSS when using square brackets inside arbitrary properties ([#11709](https://github.com/tailwindlabs/tailwindcss/pull/11709))
- Make `content` optional for presets in TypeScript types ([#11730](https://github.com/tailwindlabs/tailwindcss/pull/11730))
- Handle variable colors that have variable fallback values ([#12049](https://github.com/tailwindlabs/tailwindcss/pull/12049))
- Batch reading content files to prevent `too many open files` error ([#12079](https://github.com/tailwindlabs/tailwindcss/pull/12079))

### Added

Expand Down
4 changes: 2 additions & 2 deletions oxide/crates/core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -342,8 +342,8 @@ pub fn parse_candidate_strings(input: Vec<ChangedContent>, options: u8) -> Vec<S

match (IO::from(options), Parsing::from(options)) {
(IO::Sequential, Parsing::Sequential) => parse_all_blobs_sync(read_all_files_sync(input)),
(IO::Sequential, Parsing::Parallel) => parse_all_blobs_sync(read_all_files(input)),
(IO::Parallel, Parsing::Sequential) => parse_all_blobs(read_all_files_sync(input)),
(IO::Sequential, Parsing::Parallel) => parse_all_blobs(read_all_files_sync(input)),
(IO::Parallel, Parsing::Sequential) => parse_all_blobs_sync(read_all_files(input)),
(IO::Parallel, Parsing::Parallel) => parse_all_blobs(read_all_files(input)),
}
}
Expand Down
68 changes: 36 additions & 32 deletions src/lib/expandTailwindAtRules.js
Original file line number Diff line number Diff line change
Expand Up @@ -135,43 +135,47 @@ export default function expandTailwindAtRules(context) {

env.DEBUG && console.time('Reading changed files')

if (flagEnabled(context.tailwindConfig, 'oxideParser')) {
let rustParserContent = []
let regexParserContent = []

for (let item of context.changedContent) {
let transformer = getTransformer(context.tailwindConfig, item.extension)
let extractor = getExtractor(context, item.extension)

if (transformer === builtInTransformers.DEFAULT && extractor?.DEFAULT_EXTRACTOR === true) {
rustParserContent.push(item)
} else {
regexParserContent.push([item, { transformer, extractor }])
}
/** @type {[item: {file?: string, content?: string}, meta: {transformer: any, extractor: any}][]} */
let regexParserContent = []

/** @type {{file?: string, content?: string}[]} */
let rustParserContent = []

for (let item of context.changedContent) {
let transformer = getTransformer(context.tailwindConfig, item.extension)
let extractor = getExtractor(context, item.extension)

if (
flagEnabled(context.tailwindConfig, 'oxideParser') &&
transformer === builtInTransformers.DEFAULT &&
extractor?.DEFAULT_EXTRACTOR === true
) {
rustParserContent.push(item)
} else {
regexParserContent.push([item, { transformer, extractor }])
}
}

if (rustParserContent.length > 0) {
for (let candidate of parseCandidateStrings(
rustParserContent,
IO.Parallel | Parsing.Parallel
)) {
candidates.add(candidate)
}
// Read files using our newer, faster parser when:
// - Oxide is enabled; AND
// - The file is using default transfomers and extractors
if (rustParserContent.length > 0) {
for (let candidate of parseCandidateStrings(
rustParserContent,
IO.Parallel | Parsing.Parallel
)) {
candidates.add(candidate)
}
}

// Otherwise, read any files in node and parse with regexes
const BATCH_SIZE = 500

for (let i = 0; i < regexParserContent.length; i += BATCH_SIZE) {
let batch = regexParserContent.slice(i, i + BATCH_SIZE)

if (regexParserContent.length > 0) {
await Promise.all(
regexParserContent.map(async ([{ file, content }, { transformer, extractor }]) => {
content = file ? await fs.promises.readFile(file, 'utf8') : content
getClassCandidates(transformer(content), extractor, candidates, seen)
})
)
}
} else {
await Promise.all(
context.changedContent.map(async ({ file, content, extension }) => {
let transformer = getTransformer(context.tailwindConfig, extension)
let extractor = getExtractor(context, extension)
batch.map(async ([{ file, content }, { transformer, extractor }]) => {
content = file ? await fs.promises.readFile(file, 'utf8') : content
getClassCandidates(transformer(content), extractor, candidates, seen)
})
Expand Down