-
Notifications
You must be signed in to change notification settings - Fork 1.2k
/
Copy pathlinker.go
5275 lines (4634 loc) · 185 KB
/
linker.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package bundler
import (
"bytes"
"crypto/sha1"
"encoding/base64"
"encoding/binary"
"fmt"
"hash"
"math/rand"
"sort"
"strings"
"sync"
"time"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_printer"
"github.com/evanw/esbuild/internal/fs"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_lexer"
"github.com/evanw/esbuild/internal/js_printer"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/renamer"
"github.com/evanw/esbuild/internal/resolver"
"github.com/evanw/esbuild/internal/runtime"
"github.com/evanw/esbuild/internal/sourcemap"
)
type bitSet struct {
entries []byte
}
func newBitSet(bitCount uint) bitSet {
return bitSet{make([]byte, (bitCount+7)/8)}
}
func (bs bitSet) hasBit(bit uint) bool {
return (bs.entries[bit/8] & (1 << (bit & 7))) != 0
}
func (bs bitSet) setBit(bit uint) {
bs.entries[bit/8] |= 1 << (bit & 7)
}
func (bs bitSet) equals(other bitSet) bool {
return bytes.Equal(bs.entries, other.entries)
}
func (bs *bitSet) isAllZeros() bool {
for _, v := range bs.entries {
if v != 0 {
return false
}
}
return true
}
type linkerContext struct {
options *config.Options
log logger.Log
fs fs.FS
res resolver.Resolver
symbols js_ast.SymbolMap
entryPoints []entryMeta
files []file
// This helps avoid an infinite loop when matching imports to exports
cycleDetector []importTracker
// We should avoid traversing all files in the bundle, because the linker
// should be able to run a linking operation on a large bundle where only
// a few files are needed (e.g. an incremental compilation scenario). This
// holds all files that could possibly be reached through the entry points.
// If you need to iterate over all files in the linking operation, iterate
// over this array. This array is also sorted in a deterministic ordering
// to help ensure deterministic builds (source indices are random).
reachableFiles []uint32
// This maps from unstable source index to stable reachable file index. This
// is useful as a deterministic key for sorting if you need to sort something
// containing a source index (such as "js_ast.Ref" symbol references).
stableSourceIndices []uint32
// We may need to refer to the CommonJS "module" symbol for exports
unboundModuleRef js_ast.Ref
// This represents the parallel computation of source map related data.
// Calling this will block until the computation is done. The resulting value
// is shared between threads and must be treated as immutable.
dataForSourceMaps func() []dataForSourceMap
// The unique key prefix is a random string that is unique to every linking
// operation. It is used as a prefix for the unique keys assigned to every
// chunk. These unique keys are used to identify each chunk before the final
// output paths have been computed.
uniqueKeyPrefix string
uniqueKeyPrefixBytes []byte // This is just "uniqueKeyPrefix" in byte form
}
type wrapKind uint8
const (
wrapNone wrapKind = iota
// The module will be bundled CommonJS-style like this:
//
// // foo.ts
// let require_foo = __commonJS((exports, module) => {
// exports.foo = 123;
// });
//
// // bar.ts
// let foo = flag ? require_foo() : null;
//
wrapCJS
// The module will be bundled ESM-style like this:
//
// // foo.ts
// var foo, foo_exports = {};
// __exports(foo_exports, {
// foo: () => foo
// });
// let init_foo = __esm(() => {
// foo = 123;
// });
//
// // bar.ts
// let foo = flag ? (init_foo(), foo_exports) : null;
//
wrapESM
)
// This contains linker-specific metadata corresponding to a "file" struct
// from the initial scan phase of the bundler. It's separated out because it's
// conceptually only used for a single linking operation and because multiple
// linking operations may be happening in parallel with different metadata for
// the same file.
type jsMeta struct {
partMeta []partMeta
// This is the index to the automatically-generated part containing code that
// calls "__export(exports, { ... getters ... })". This is used to generate
// getters on an exports object for ES6 export statements, and is both for
// ES6 star imports and CommonJS-style modules.
nsExportPartIndex uint32
// This is only for TypeScript files. If an import symbol is in this map, it
// means the import couldn't be found and doesn't actually exist. This is not
// an error in TypeScript because the import is probably just a type.
//
// Normally we remove all unused imports for TypeScript files during parsing,
// which automatically removes type-only imports. But there are certain re-
// export situations where it's impossible to tell if an import is a type or
// not:
//
// import {typeOrNotTypeWhoKnows} from 'path';
// export {typeOrNotTypeWhoKnows};
//
// Really people should be using the TypeScript "isolatedModules" flag with
// bundlers like this one that compile TypeScript files independently without
// type checking. That causes the TypeScript type checker to emit the error
// "Re-exporting a type when the '--isolatedModules' flag is provided requires
// using 'export type'." But we try to be robust to such code anyway.
isProbablyTypeScriptType map[js_ast.Ref]bool
// Imports are matched with exports in a separate pass from when the matched
// exports are actually bound to the imports. Here "binding" means adding non-
// local dependencies on the parts in the exporting file that declare the
// exported symbol to all parts in the importing file that use the imported
// symbol.
//
// This must be a separate pass because of the "probably TypeScript type"
// check above. We can't generate the part for the export namespace until
// we've matched imports with exports because the generated code must omit
// type-only imports in the export namespace code. And we can't bind exports
// to imports until the part for the export namespace is generated since that
// part needs to participate in the binding.
//
// This array holds the deferred imports to bind so the pass can be split
// into two separate passes.
importsToBind map[js_ast.Ref]importData
isAsyncOrHasAsyncDependency bool
wrap wrapKind
// If true, the "__export(exports, { ... })" call will be force-included even
// if there are no parts that reference "exports". Otherwise this call will
// be removed due to the tree shaking pass. This is used when for entry point
// files when code related to the current output format needs to reference
// the "exports" variable.
forceIncludeExportsForEntryPoint bool
// This is set when we need to pull in the "__export" symbol in to the part
// at "nsExportPartIndex". This can't be done in "createExportsForFile"
// because of concurrent map hazards. Instead, it must be done later.
needsExportSymbolFromRuntime bool
needsMarkAsModuleSymbolFromRuntime bool
// The index of the automatically-generated part used to represent the
// CommonJS or ESM wrapper. This part is empty and is only useful for tree
// shaking and code splitting. The wrapper can't be inserted into the part
// because the wrapper contains other parts, which can't be represented by
// the current part system.
wrapperPartIndex ast.Index32
// This includes both named exports and re-exports.
//
// Named exports come from explicit export statements in the original file,
// and are copied from the "NamedExports" field in the AST.
//
// Re-exports come from other files and are the result of resolving export
// star statements (i.e. "export * from 'foo'").
resolvedExports map[string]exportData
resolvedExportStar *exportData
// Never iterate over "resolvedExports" directly. Instead, iterate over this
// array. Some exports in that map aren't meant to end up in generated code.
// This array excludes these exports and is also sorted, which avoids non-
// determinism due to random map iteration order.
sortedAndFilteredExportAliases []string
// If this is an entry point, this array holds a reference to one free
// temporary symbol for each entry in "sortedAndFilteredExportAliases".
// These may be needed to store copies of CommonJS re-exports in ESM.
cjsExportCopies []js_ast.Ref
}
type importData struct {
// This is an array of intermediate statements that re-exported this symbol
// in a chain before getting to the final symbol. This can be done either with
// "export * from" or "export {} from". If this is done with "export * from"
// then this may not be the result of a single chain but may instead form
// a diamond shape if this same symbol was re-exported multiple times from
// different files.
reExports []nonLocalDependency
sourceIndex uint32
nameLoc logger.Loc // Optional, goes with sourceIndex, ignore if zero
ref js_ast.Ref
}
type exportData struct {
ref js_ast.Ref
// Export star resolution happens first before import resolution. That means
// it cannot yet determine if duplicate names from export star resolution are
// ambiguous (point to different symbols) or not (point to the same symbol).
// This issue can happen in the following scenario:
//
// // entry.js
// export * from './a'
// export * from './b'
//
// // a.js
// export * from './c'
//
// // b.js
// export {x} from './c'
//
// // c.js
// export let x = 1, y = 2
//
// In this case "entry.js" should have two exports "x" and "y", neither of
// which are ambiguous. To handle this case, ambiguity resolution must be
// deferred until import resolution time. That is done using this array.
potentiallyAmbiguousExportStarRefs []importData
// This is the file that the named export above came from. This will be
// different from the file that contains this object if this is a re-export.
sourceIndex uint32
nameLoc logger.Loc // Optional, goes with sourceIndex, ignore if zero
}
// This contains linker-specific metadata corresponding to a "js_ast.Part" struct
// from the initial scan phase of the bundler. It's separated out because it's
// conceptually only used for a single linking operation and because multiple
// linking operations may be happening in parallel with different metadata for
// the same part in the same file.
type partMeta struct {
// This part is considered live if any entry point can reach this part. In
// addition, we want to avoid visiting a given part twice during the depth-
// first live code detection traversal for a single entry point. This index
// solves both of these problems at once. This part is live if this index
// is valid, and this part should not be re-visited if this index equals
// the index of the current entry point being visted.
lastEntryBit ast.Index32
// These are dependencies that come from other files via import statements.
nonLocalDependencies []nonLocalDependency
}
func (pm *partMeta) isLive() bool {
return pm.lastEntryBit.IsValid()
}
type nonLocalDependency struct {
sourceIndex uint32
partIndex uint32
}
type partRange struct {
sourceIndex uint32
partIndexBegin uint32
partIndexEnd uint32
}
type chunkInfo struct {
// This is a random string and is used to represent the output path of this
// chunk before the final output path has been computed.
uniqueKey string
filesWithPartsInChunk map[uint32]bool
filesInChunkInOrder []uint32
partsInChunkInOrder []partRange
entryBits bitSet
// This information is only useful if "isEntryPoint" is true
isEntryPoint bool
sourceIndex uint32 // An index into "c.sources"
entryPointBit uint // An index into "c.entryPoints"
// For code splitting
crossChunkImports []uint32
// This is the representation-specific information
chunkRepr chunkRepr
// This is the final path of this chunk relative to the output directory, but
// without the substitution of the final hash (since it hasn't been computed).
finalTemplate []config.PathTemplate
// This is the final path of this chunk relative to the output directory. It
// is the substitution of the final hash into "finalTemplate".
finalRelPath string
// When this chunk is initially generated in isolation, the output pieces
// will contain slices of the output with the unique keys of other chunks
// omitted. The output hash will contain the hash of those pieces. At this
// point, this variable is the current value of the output hash.
isolatedChunkHash []byte
// Later on in the linking process, the hashes of the referenced other chunks
// will be mixed into the hash. This is separated into two phases like this
// to handle cycles in the chunk import graph.
outputPieces []outputPiece
outputHash hash.Hash
// Other fields relating to the output file for this chunk
jsonMetadataChunkCallback func(finalOutputSize int) []byte
outputSourceMap sourcemap.SourceMapPieces
isExecutable bool
}
// This is a chunk of source code followed by a reference to another chunk. For
// example, the file "@import 'CHUNK0001'; body { color: black; }" would be
// represented by two pieces, one with the data "@import '" and another with the
// data "'; body { color: black; }". The first would have the chunk index 1 and
// the second would have an invalid chunk index.
type outputPiece struct {
data []byte
// Note: This may be invalid. For example, the chunk may not contain any
// imports, in which case there is one piece with data and no chunk index.
chunkIndex ast.Index32
}
type chunkRepr interface{ isChunk() }
func (*chunkReprJS) isChunk() {}
func (*chunkReprCSS) isChunk() {}
type chunkReprJS struct {
// For code splitting
crossChunkPrefixStmts []js_ast.Stmt
crossChunkSuffixStmts []js_ast.Stmt
exportsToOtherChunks map[js_ast.Ref]string
importsFromOtherChunks map[uint32]crossChunkImportItemArray
}
type chunkReprCSS struct {
}
// Returns a log where "log.HasErrors()" only returns true if any errors have
// been logged since this call. This is useful when there have already been
// errors logged by other linkers that share the same log.
func wrappedLog(log logger.Log) logger.Log {
var mutex sync.Mutex
var hasErrors bool
addMsg := log.AddMsg
log.AddMsg = func(msg logger.Msg) {
if msg.Kind == logger.Error {
mutex.Lock()
defer mutex.Unlock()
hasErrors = true
}
addMsg(msg)
}
log.HasErrors = func() bool {
mutex.Lock()
defer mutex.Unlock()
return hasErrors
}
return log
}
func newLinkerContext(
options *config.Options,
log logger.Log,
fs fs.FS,
res resolver.Resolver,
files []file,
entryPoints []entryMeta,
reachableFiles []uint32,
dataForSourceMaps func() []dataForSourceMap,
) linkerContext {
log = wrappedLog(log)
// Clone information about symbols and files so we don't mutate the input data
c := linkerContext{
options: options,
log: log,
fs: fs,
res: res,
entryPoints: append([]entryMeta{}, entryPoints...),
files: make([]file, len(files)),
symbols: js_ast.NewSymbolMap(len(files)),
reachableFiles: reachableFiles,
dataForSourceMaps: dataForSourceMaps,
}
// Clone various things since we may mutate them later
for _, sourceIndex := range c.reachableFiles {
file := files[sourceIndex]
switch repr := file.repr.(type) {
case *reprJS:
// Clone the representation
{
clone := *repr
repr = &clone
file.repr = repr
}
// Clone the symbol map
fileSymbols := append([]js_ast.Symbol{}, repr.ast.Symbols...)
c.symbols.Outer[sourceIndex] = fileSymbols
repr.ast.Symbols = nil
// Clone the parts
repr.ast.Parts = append([]js_ast.Part{}, repr.ast.Parts...)
for i, part := range repr.ast.Parts {
clone := make(map[js_ast.Ref]js_ast.SymbolUse, len(part.SymbolUses))
for ref, uses := range part.SymbolUses {
clone[ref] = uses
}
repr.ast.Parts[i].SymbolUses = clone
}
// Clone the import records
repr.ast.ImportRecords = append([]ast.ImportRecord{}, repr.ast.ImportRecords...)
// Clone the import map
namedImports := make(map[js_ast.Ref]js_ast.NamedImport, len(repr.ast.NamedImports))
for k, v := range repr.ast.NamedImports {
namedImports[k] = v
}
repr.ast.NamedImports = namedImports
// Clone the export map
resolvedExports := make(map[string]exportData)
for alias, name := range repr.ast.NamedExports {
resolvedExports[alias] = exportData{
ref: name.Ref,
sourceIndex: sourceIndex,
nameLoc: name.AliasLoc,
}
}
// Clone the top-level symbol-to-parts map
topLevelSymbolToParts := make(map[js_ast.Ref][]uint32)
for ref, parts := range repr.ast.TopLevelSymbolToParts {
topLevelSymbolToParts[ref] = parts
}
repr.ast.TopLevelSymbolToParts = topLevelSymbolToParts
// Clone the top-level scope so we can generate more variables
{
new := &js_ast.Scope{}
*new = *repr.ast.ModuleScope
new.Generated = append([]js_ast.Ref{}, new.Generated...)
repr.ast.ModuleScope = new
}
// Also associate some default metadata with the file
repr.meta.partMeta = make([]partMeta, len(repr.ast.Parts))
repr.meta.resolvedExports = resolvedExports
repr.meta.isProbablyTypeScriptType = make(map[js_ast.Ref]bool)
repr.meta.importsToBind = make(map[js_ast.Ref]importData)
case *reprCSS:
// Clone the representation
{
clone := *repr
repr = &clone
file.repr = repr
}
// Clone the import records
repr.ast.ImportRecords = append([]ast.ImportRecord{}, repr.ast.ImportRecords...)
}
// All files start off as far as possible from an entry point
file.distanceFromEntryPoint = ^uint32(0)
// Update the file in our copy of the file array
c.files[sourceIndex] = file
}
// Create a way to convert source indices to a stable ordering
c.stableSourceIndices = make([]uint32, len(c.files))
for stableIndex, sourceIndex := range c.reachableFiles {
c.stableSourceIndices[sourceIndex] = uint32(stableIndex)
}
// Mark all entry points so we don't add them again for import() expressions
for _, entryPoint := range entryPoints {
file := &c.files[entryPoint.sourceIndex]
file.entryPointKind = entryPointUserSpecified
if repr, ok := file.repr.(*reprJS); ok {
// Loaders default to CommonJS when they are the entry point and the output
// format is not ESM-compatible since that avoids generating the ESM-to-CJS
// machinery.
if repr.ast.HasLazyExport && (c.options.Mode == config.ModePassThrough ||
(c.options.Mode == config.ModeConvertFormat && !c.options.OutputFormat.KeepES6ImportExportSyntax())) {
repr.ast.ExportsKind = js_ast.ExportsCommonJS
}
// Entry points with ES6 exports must generate an exports object when
// targeting non-ES6 formats. Note that the IIFE format only needs this
// when the global name is present, since that's the only way the exports
// can actually be observed externally.
if repr.ast.ExportKeyword.Len > 0 && (options.OutputFormat == config.FormatCommonJS ||
(options.OutputFormat == config.FormatIIFE && len(options.GlobalName) > 0)) {
repr.ast.UsesExportsRef = true
repr.meta.forceIncludeExportsForEntryPoint = true
}
}
}
// Allocate a new unbound symbol called "module" in case we need it later
if c.options.OutputFormat == config.FormatCommonJS {
runtimeSymbols := &c.symbols.Outer[runtime.SourceIndex]
runtimeScope := c.files[runtime.SourceIndex].repr.(*reprJS).ast.ModuleScope
c.unboundModuleRef = js_ast.Ref{OuterIndex: runtime.SourceIndex, InnerIndex: uint32(len(*runtimeSymbols))}
runtimeScope.Generated = append(runtimeScope.Generated, c.unboundModuleRef)
*runtimeSymbols = append(*runtimeSymbols, js_ast.Symbol{
Kind: js_ast.SymbolUnbound,
OriginalName: "module",
Link: js_ast.InvalidRef,
})
} else {
c.unboundModuleRef = js_ast.InvalidRef
}
return c
}
func (c *linkerContext) addPartToFile(sourceIndex uint32, part js_ast.Part, partMeta partMeta) uint32 {
if part.LocalDependencies == nil {
part.LocalDependencies = make(map[uint32]bool)
}
if part.SymbolUses == nil {
part.SymbolUses = make(map[js_ast.Ref]js_ast.SymbolUse)
}
repr := c.files[sourceIndex].repr.(*reprJS)
partIndex := uint32(len(repr.ast.Parts))
repr.ast.Parts = append(repr.ast.Parts, part)
repr.meta.partMeta = append(repr.meta.partMeta, partMeta)
return partIndex
}
func (c *linkerContext) generateUniqueKeyPrefix() bool {
var data [12]byte
rand.Seed(time.Now().UnixNano())
if _, err := rand.Read(data[:]); err != nil {
c.log.AddError(nil, logger.Loc{}, fmt.Sprintf("Failed to read from randomness source: %s", err.Error()))
return false
}
// This is 16 bytes and shouldn't generate escape characters when put into strings
c.uniqueKeyPrefix = base64.URLEncoding.EncodeToString(data[:])
c.uniqueKeyPrefixBytes = []byte(c.uniqueKeyPrefix)
return true
}
func (c *linkerContext) link() []OutputFile {
if !c.generateUniqueKeyPrefix() {
return nil
}
c.scanImportsAndExports()
// Stop now if there were errors
if c.log.HasErrors() {
return []OutputFile{}
}
c.markPartsReachableFromEntryPoints()
if c.options.Mode == config.ModePassThrough {
for _, entryPoint := range c.entryPoints {
c.preventExportsFromBeingRenamed(entryPoint.sourceIndex)
}
}
chunks := c.computeChunks()
c.computeCrossChunkDependencies(chunks)
// Make sure calls to "js_ast.FollowSymbols()" in parallel goroutines after this
// won't hit concurrent map mutation hazards
js_ast.FollowAllSymbols(c.symbols)
return c.generateChunksInParallel(chunks)
}
func (c *linkerContext) generateChunksInParallel(chunks []chunkInfo) []OutputFile {
// Generate each chunk on a separate goroutine
generateWaitGroup := sync.WaitGroup{}
generateWaitGroup.Add(len(chunks))
for chunkIndex := range chunks {
switch chunks[chunkIndex].chunkRepr.(type) {
case *chunkReprJS:
go c.generateChunkJS(chunks, chunkIndex, &generateWaitGroup)
case *chunkReprCSS:
go c.generateChunkCSS(chunks, chunkIndex, &generateWaitGroup)
}
}
generateWaitGroup.Wait()
// Compute the final hashes of each chunk. This can technically be done in
// parallel but it probably doesn't matter so much because we're not hashing
// that much data.
visited := make([]uint32, len(chunks))
var finalHash [sha1.Size]byte
for chunkIndex := range chunks {
chunk := &chunks[chunkIndex]
// Compute the final hash using the isolated hashes of the dependencies
appendIsolatedHashesForImportedChunks(chunk.outputHash, chunks, uint32(chunkIndex), visited, ^uint32(chunkIndex))
chunk.outputHash.Sum(finalHash[:0])
// Render the last remaining placeholder in the template
hash := hashForFileName(finalHash)
chunk.finalRelPath = config.TemplateToString(config.SubstituteTemplate(chunk.finalTemplate, config.PathPlaceholders{
Hash: &hash,
}))
}
// Generate the final output files by joining file pieces together
var resultsWaitGroup sync.WaitGroup
results := make([][]OutputFile, len(chunks))
resultsWaitGroup.Add(len(chunks))
for chunkIndex, chunk := range chunks {
go func(chunkIndex int, chunk chunkInfo) {
var outputFiles []OutputFile
// Each file may optionally contain additional files to be copied to the
// output directory. This is used by the "file" loader.
for _, sourceIndex := range chunk.filesInChunkInOrder {
outputFiles = append(outputFiles, c.files[sourceIndex].additionalFiles...)
}
// Path substitution for the chunk itself
finalRelDir := c.fs.Dir(chunk.finalRelPath)
outputContentsJoiner, outputSourceMapShifts := c.substituteFinalPaths(chunks, chunk.outputPieces, func(finalRelPathForImport string) string {
return c.pathBetweenChunks(finalRelDir, finalRelPathForImport)
})
// Generate the optional source map for this chunk
if c.options.SourceMap != config.SourceMapNone && chunk.outputSourceMap.Suffix != nil {
outputSourceMap := chunk.outputSourceMap.Finalize(outputSourceMapShifts)
finalRelPathForSourceMap := chunk.finalRelPath + ".map"
// Potentially write a trailing source map comment
switch c.options.SourceMap {
case config.SourceMapLinkedWithComment:
importPath := c.pathBetweenChunks(finalRelDir, finalRelPathForSourceMap)
importPath = strings.TrimPrefix(importPath, "./")
outputContentsJoiner.EnsureNewlineAtEnd()
outputContentsJoiner.AddString("//# sourceMappingURL=")
outputContentsJoiner.AddString(importPath)
outputContentsJoiner.AddString("\n")
case config.SourceMapInline, config.SourceMapInlineAndExternal:
outputContentsJoiner.EnsureNewlineAtEnd()
outputContentsJoiner.AddString("//# sourceMappingURL=data:application/json;base64,")
outputContentsJoiner.AddString(base64.StdEncoding.EncodeToString(outputSourceMap))
outputContentsJoiner.AddString("\n")
}
// Potentially write the external source map file
switch c.options.SourceMap {
case config.SourceMapLinkedWithComment, config.SourceMapInlineAndExternal, config.SourceMapExternalWithoutComment:
outputFiles = append(outputFiles, OutputFile{
AbsPath: c.fs.Join(c.options.AbsOutputDir, finalRelPathForSourceMap),
Contents: outputSourceMap,
jsonMetadataChunk: fmt.Sprintf(
"{\n \"imports\": [],\n \"exports\": [],\n \"inputs\": {},\n \"bytes\": %d\n }", len(outputSourceMap)),
})
}
}
// Finalize the output contents
outputContents := outputContentsJoiner.Done()
// Path substitution for the JSON metadata
var jsonMetadataChunk string
if c.options.NeedsMetafile {
jsonMetadataChunkPieces := c.breakOutputIntoPieces(chunk.jsonMetadataChunkCallback(len(outputContents)), uint32(len(chunks)))
jsonMetadataChunkBytes, _ := c.substituteFinalPaths(chunks, jsonMetadataChunkPieces, func(finalRelPathForImport string) string {
return c.res.PrettyPath(logger.Path{Text: c.fs.Join(c.options.AbsOutputDir, finalRelPathForImport), Namespace: "file"})
})
jsonMetadataChunk = string(jsonMetadataChunkBytes.Done())
}
// Generate the output file for this chunk
outputFiles = append(outputFiles, OutputFile{
AbsPath: c.fs.Join(c.options.AbsOutputDir, chunk.finalRelPath),
Contents: outputContents,
jsonMetadataChunk: jsonMetadataChunk,
IsExecutable: chunk.isExecutable,
})
results[chunkIndex] = outputFiles
resultsWaitGroup.Done()
}(chunkIndex, chunk)
}
resultsWaitGroup.Wait()
// Merge the output files from the different goroutines together in order
outputFilesLen := 0
for _, result := range results {
outputFilesLen += len(result)
}
outputFiles := make([]OutputFile, 0, outputFilesLen)
for _, result := range results {
outputFiles = append(outputFiles, result...)
}
return outputFiles
}
// Given a set of output pieces (i.e. a buffer already divided into the spans
// between import paths), substitute the final import paths in and then join
// everything into a single byte buffer.
func (c *linkerContext) substituteFinalPaths(
chunks []chunkInfo,
pieces []outputPiece,
modifyPath func(string) string,
) (j helpers.Joiner, shifts []sourcemap.SourceMapShift) {
var shift sourcemap.SourceMapShift
shifts = make([]sourcemap.SourceMapShift, 0, len(pieces))
shifts = append(shifts, shift)
for _, piece := range pieces {
var dataOffset sourcemap.LineColumnOffset
j.AddBytes(piece.data)
dataOffset.AdvanceBytes(piece.data)
shift.Before.Add(dataOffset)
shift.After.Add(dataOffset)
if piece.chunkIndex.IsValid() {
chunk := chunks[piece.chunkIndex.GetIndex()]
importPath := modifyPath(chunk.finalRelPath)
j.AddString(importPath)
shift.Before.AdvanceString(chunk.uniqueKey)
shift.After.AdvanceString(importPath)
shifts = append(shifts, shift)
}
}
return
}
func (c *linkerContext) pathBetweenChunks(fromRelDir string, toRelPath string) string {
// Join with the public path if it has been configured
if c.options.PublicPath != "" {
return joinWithPublicPath(c.options.PublicPath, toRelPath)
}
// Otherwise, return a relative path
relPath, ok := c.fs.Rel(fromRelDir, toRelPath)
if !ok {
c.log.AddError(nil, logger.Loc{},
fmt.Sprintf("Cannot traverse from directory %q to chunk %q", fromRelDir, toRelPath))
return ""
}
// Make sure to always use forward slashes, even on Windows
relPath = strings.ReplaceAll(relPath, "\\", "/")
// Make sure the relative path doesn't start with a name, since that could
// be interpreted as a package path instead of a relative path
if !strings.HasPrefix(relPath, "./") && !strings.HasPrefix(relPath, "../") {
relPath = "./" + relPath
}
return relPath
}
// Returns the path of this file relative to "outbase", which is then ready to
// be joined with the absolute output directory path. The directory and name
// components are returned separately for convenience.
//
// This makes sure to have the directory end in a slash so that it can be
// substituted into a path template without necessarily having a "/" after it.
// Extra slashes should get cleaned up automatically when we join it with the
// output directory.
func (c *linkerContext) pathRelativeToOutbase(
sourceIndex uint32,
entryPointBit uint,
stdExt string,
avoidIndex bool,
) (relDir string, baseName string, baseExt string) {
file := &c.files[sourceIndex]
relDir = "/"
baseExt = stdExt
// If the output path was configured explicitly, use it verbatim
if c.options.AbsOutputFile != "" {
baseName = c.fs.Base(c.options.AbsOutputFile)
// Strip off the extension
ext := c.fs.Ext(baseName)
baseName = baseName[:len(baseName)-len(ext)]
// Use the extension from the explicit output file path. However, don't do
// that if this is a CSS chunk but the entry point file is not CSS. In that
// case use the standard extension. This happens when importing CSS into JS.
if _, ok := file.repr.(*reprCSS); ok || stdExt != c.options.OutputExtensionCSS {
baseExt = ext
}
return
}
absPath := file.source.KeyPath.Text
isCustomOutputPath := false
if outPath := c.entryPoints[entryPointBit].outputPath; outPath != "" {
// Use the configured output path if present
absPath = outPath
if !c.fs.IsAbs(absPath) {
absPath = c.fs.Join(c.options.AbsOutputBase, absPath)
}
isCustomOutputPath = true
} else if file.source.KeyPath.Namespace != "file" {
// Come up with a path for virtual paths (i.e. non-file-system paths)
dir, base, _ := logger.PlatformIndependentPathDirBaseExt(absPath)
if avoidIndex && base == "index" {
_, base, _ = logger.PlatformIndependentPathDirBaseExt(dir)
}
baseName = sanitizeFilePathForVirtualModulePath(base)
return
} else {
// Heuristic: If the file is named something like "index.js", then use
// the name of the parent directory instead. This helps avoid the
// situation where many chunks are named "index" because of people
// dynamically-importing npm packages that make use of node's implicit
// "index" file name feature.
if avoidIndex {
base := c.fs.Base(absPath)
base = base[:len(base)-len(c.fs.Ext(base))]
if base == "index" {
absPath = c.fs.Dir(absPath)
}
}
}
// Try to get a relative path to the base directory
relPath, ok := c.fs.Rel(c.options.AbsOutputBase, absPath)
if !ok {
// This can fail in some situations such as on different drives on
// Windows. In that case we just use the file name.
baseName = c.fs.Base(absPath)
} else {
// Now we finally have a relative path
relDir = c.fs.Dir(relPath) + "/"
baseName = c.fs.Base(relPath)
// Use platform-independent slashes
relDir = strings.ReplaceAll(relDir, "\\", "/")
// Replace leading "../" so we don't try to write outside of the output
// directory. This normally can't happen because "AbsOutputBase" is
// automatically computed to contain all entry point files, but it can
// happen if someone sets it manually via the "outbase" API option.
//
// Note that we can't just strip any leading "../" because that could
// cause two separate entry point paths to collide. For example, there
// could be both "src/index.js" and "../src/index.js" as entry points.
dotDotCount := 0
for strings.HasPrefix(relDir[dotDotCount*3:], "../") {
dotDotCount++
}
if dotDotCount > 0 {
// The use of "_.._" here is somewhat arbitrary but it is unlikely to
// collide with a folder named by a human and it works on Windows
// (Windows doesn't like names that end with a "."). And not starting
// with a "." means that it will not be hidden on Unix.
relDir = strings.Repeat("_.._/", dotDotCount) + relDir[dotDotCount*3:]
}
relDir = "/" + relDir
}
// Strip the file extension if the output path is an input file
if !isCustomOutputPath {
ext := c.fs.Ext(baseName)
baseName = baseName[:len(baseName)-len(ext)]
}
return
}
func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) {
jsChunks := 0
for _, chunk := range chunks {
if _, ok := chunk.chunkRepr.(*chunkReprJS); ok {
jsChunks++
}
}
if jsChunks < 2 {
// No need to compute cross-chunk dependencies if there can't be any
return
}
type chunkMeta struct {
imports map[js_ast.Ref]bool
exports map[js_ast.Ref]bool
}
chunkMetas := make([]chunkMeta, len(chunks))
// For each chunk, see what symbols it uses from other chunks. Do this in
// parallel because it's the most expensive part of this function.
waitGroup := sync.WaitGroup{}
waitGroup.Add(len(chunks))
for chunkIndex, chunk := range chunks {
go func(chunkIndex int, chunk chunkInfo) {
imports := make(map[js_ast.Ref]bool)
chunkMetas[chunkIndex] = chunkMeta{imports: imports, exports: make(map[js_ast.Ref]bool)}
// Go over each file in this chunk
for sourceIndex := range chunk.filesWithPartsInChunk {
// Go over each part in this file that's marked for inclusion in this chunk
switch repr := c.files[sourceIndex].repr.(type) {
case *reprJS:
for partIndex, partMeta := range repr.meta.partMeta {
if !partMeta.isLive() {
continue
}
part := &repr.ast.Parts[partIndex]
// Rewrite external dynamic imports to point to the chunk for that entry point
for _, importRecordIndex := range part.ImportRecordIndices {
record := &repr.ast.ImportRecords[importRecordIndex]
if record.SourceIndex.IsValid() && c.isExternalDynamicImport(record, sourceIndex) {
otherChunkIndex := c.files[record.SourceIndex.GetIndex()].entryPointChunkIndex
record.Path.Text = chunks[otherChunkIndex].uniqueKey
record.SourceIndex = ast.Index32{}
}
}
// Remember what chunk each top-level symbol is declared in. Symbols
// with multiple declarations such as repeated "var" statements with
// the same name should already be marked as all being in a single
// chunk. In that case this will overwrite the same value below which
// is fine.
for _, declared := range part.DeclaredSymbols {
if declared.IsTopLevel {
c.symbols.Get(declared.Ref).ChunkIndex = ast.MakeIndex32(uint32(chunkIndex))
}
}
// Record each symbol used in this part. This will later be matched up
// with our map of which chunk a given symbol is declared in to
// determine if the symbol needs to be imported from another chunk.
for ref := range part.SymbolUses {
symbol := c.symbols.Get(ref)
// Ignore unbound symbols, which don't have declarations
if symbol.Kind == js_ast.SymbolUnbound {
continue
}