-
Notifications
You must be signed in to change notification settings - Fork 12.9k
/
write.rs
2011 lines (1821 loc) · 81.8 KB
/
write.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
use super::link::{self, ensure_removed};
use super::lto::{self, SerializedModule};
use super::symbol_export::symbol_name_for_instance_in_crate;
use crate::errors;
use crate::traits::*;
use crate::{
CachedModuleCodegen, CodegenResults, CompiledModule, CrateInfo, ModuleCodegen, ModuleKind,
};
use jobserver::{Acquired, Client};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::profiling::TimingGuard;
use rustc_data_structures::profiling::VerboseTimingGuard;
use rustc_data_structures::sync::Lrc;
use rustc_errors::emitter::Emitter;
use rustc_errors::{
translation::{to_fluent_args, Translate},
DiagnosticId, FatalError, Handler, Level,
};
use rustc_fs_util::link_or_copy;
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc_incremental::{
copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
};
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::middle::exported_symbols::SymbolExportInfo;
use rustc_middle::ty::TyCtxt;
use rustc_session::cgu_reuse_tracker::CguReuseTracker;
use rustc_session::config::{self, CrateType, Lto, OutputFilenames, OutputType};
use rustc_session::config::{Passes, SwitchWithOptPath};
use rustc_session::Session;
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::sym;
use rustc_span::{BytePos, FileName, InnerSpan, Pos, Span};
use rustc_target::spec::{MergeFunctions, SanitizerSet};
use std::any::Any;
use std::fs;
use std::io;
use std::marker::PhantomData;
use std::mem;
use std::path::{Path, PathBuf};
use std::str;
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::Arc;
use std::thread;
const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
/// What kind of object file to emit.
#[derive(Clone, Copy, PartialEq)]
pub enum EmitObj {
// No object file.
None,
// Just uncompressed llvm bitcode. Provides easy compatibility with
// emscripten's ecc compiler, when used as the linker.
Bitcode,
// Object code, possibly augmented with a bitcode section.
ObjectCode(BitcodeSection),
}
/// What kind of llvm bitcode section to embed in an object file.
#[derive(Clone, Copy, PartialEq)]
pub enum BitcodeSection {
// No bitcode section.
None,
// A full, uncompressed bitcode section.
Full,
}
/// Module-specific configuration for `optimize_and_codegen`.
pub struct ModuleConfig {
/// Names of additional optimization passes to run.
pub passes: Vec<String>,
/// Some(level) to optimize at a certain level, or None to run
/// absolutely no optimizations (used for the metadata module).
pub opt_level: Option<config::OptLevel>,
/// Some(level) to optimize binary size, or None to not affect program size.
pub opt_size: Option<config::OptLevel>,
pub pgo_gen: SwitchWithOptPath,
pub pgo_use: Option<PathBuf>,
pub pgo_sample_use: Option<PathBuf>,
pub debug_info_for_profiling: bool,
pub instrument_coverage: bool,
pub instrument_gcov: bool,
pub sanitizer: SanitizerSet,
pub sanitizer_recover: SanitizerSet,
pub sanitizer_memory_track_origins: usize,
// Flags indicating which outputs to produce.
pub emit_pre_lto_bc: bool,
pub emit_no_opt_bc: bool,
pub emit_bc: bool,
pub emit_ir: bool,
pub emit_asm: bool,
pub emit_obj: EmitObj,
pub emit_thin_lto: bool,
pub bc_cmdline: String,
// Miscellaneous flags. These are mostly copied from command-line
// options.
pub verify_llvm_ir: bool,
pub no_prepopulate_passes: bool,
pub no_builtins: bool,
pub time_module: bool,
pub vectorize_loop: bool,
pub vectorize_slp: bool,
pub merge_functions: bool,
pub inline_threshold: Option<u32>,
pub emit_lifetime_markers: bool,
pub llvm_plugins: Vec<String>,
}
impl ModuleConfig {
fn new(
kind: ModuleKind,
sess: &Session,
no_builtins: bool,
is_compiler_builtins: bool,
) -> ModuleConfig {
// If it's a regular module, use `$regular`, otherwise use `$other`.
// `$regular` and `$other` are evaluated lazily.
macro_rules! if_regular {
($regular: expr, $other: expr) => {
if let ModuleKind::Regular = kind { $regular } else { $other }
};
}
let opt_level_and_size = if_regular!(Some(sess.opts.optimize), None);
let save_temps = sess.opts.cg.save_temps;
let should_emit_obj = sess.opts.output_types.contains_key(&OutputType::Exe)
|| match kind {
ModuleKind::Regular => sess.opts.output_types.contains_key(&OutputType::Object),
ModuleKind::Allocator => false,
ModuleKind::Metadata => sess.opts.output_types.contains_key(&OutputType::Metadata),
};
let emit_obj = if !should_emit_obj {
EmitObj::None
} else if sess.target.obj_is_bitcode
|| (sess.opts.cg.linker_plugin_lto.enabled() && !no_builtins)
{
// This case is selected if the target uses objects as bitcode, or
// if linker plugin LTO is enabled. In the linker plugin LTO case
// the assumption is that the final link-step will read the bitcode
// and convert it to object code. This may be done by either the
// native linker or rustc itself.
//
// Note, however, that the linker-plugin-lto requested here is
// explicitly ignored for `#![no_builtins]` crates. These crates are
// specifically ignored by rustc's LTO passes and wouldn't work if
// loaded into the linker. These crates define symbols that LLVM
// lowers intrinsics to, and these symbol dependencies aren't known
// until after codegen. As a result any crate marked
// `#![no_builtins]` is assumed to not participate in LTO and
// instead goes on to generate object code.
EmitObj::Bitcode
} else if need_bitcode_in_object(sess) {
EmitObj::ObjectCode(BitcodeSection::Full)
} else {
EmitObj::ObjectCode(BitcodeSection::None)
};
ModuleConfig {
passes: if_regular!(sess.opts.cg.passes.clone(), vec![]),
opt_level: opt_level_and_size,
opt_size: opt_level_and_size,
pgo_gen: if_regular!(
sess.opts.cg.profile_generate.clone(),
SwitchWithOptPath::Disabled
),
pgo_use: if_regular!(sess.opts.cg.profile_use.clone(), None),
pgo_sample_use: if_regular!(sess.opts.unstable_opts.profile_sample_use.clone(), None),
debug_info_for_profiling: sess.opts.unstable_opts.debug_info_for_profiling,
instrument_coverage: if_regular!(sess.instrument_coverage(), false),
instrument_gcov: if_regular!(
// compiler_builtins overrides the codegen-units settings,
// which is incompatible with -Zprofile which requires that
// only a single codegen unit is used per crate.
sess.opts.unstable_opts.profile && !is_compiler_builtins,
false
),
sanitizer: if_regular!(sess.opts.unstable_opts.sanitizer, SanitizerSet::empty()),
sanitizer_recover: if_regular!(
sess.opts.unstable_opts.sanitizer_recover,
SanitizerSet::empty()
),
sanitizer_memory_track_origins: if_regular!(
sess.opts.unstable_opts.sanitizer_memory_track_origins,
0
),
emit_pre_lto_bc: if_regular!(
save_temps || need_pre_lto_bitcode_for_incr_comp(sess),
false
),
emit_no_opt_bc: if_regular!(save_temps, false),
emit_bc: if_regular!(
save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode),
save_temps
),
emit_ir: if_regular!(
sess.opts.output_types.contains_key(&OutputType::LlvmAssembly),
false
),
emit_asm: if_regular!(
sess.opts.output_types.contains_key(&OutputType::Assembly),
false
),
emit_obj,
emit_thin_lto: sess.opts.unstable_opts.emit_thin_lto,
bc_cmdline: sess.target.bitcode_llvm_cmdline.to_string(),
verify_llvm_ir: sess.verify_llvm_ir(),
no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes,
no_builtins: no_builtins || sess.target.no_builtins,
// Exclude metadata and allocator modules from time_passes output,
// since they throw off the "LLVM passes" measurement.
time_module: if_regular!(true, false),
// Copy what clang does by turning on loop vectorization at O2 and
// slp vectorization at O3.
vectorize_loop: !sess.opts.cg.no_vectorize_loops
&& (sess.opts.optimize == config::OptLevel::Default
|| sess.opts.optimize == config::OptLevel::Aggressive),
vectorize_slp: !sess.opts.cg.no_vectorize_slp
&& sess.opts.optimize == config::OptLevel::Aggressive,
// Some targets (namely, NVPTX) interact badly with the
// MergeFunctions pass. This is because MergeFunctions can generate
// new function calls which may interfere with the target calling
// convention; e.g. for the NVPTX target, PTX kernels should not
// call other PTX kernels. MergeFunctions can also be configured to
// generate aliases instead, but aliases are not supported by some
// backends (again, NVPTX). Therefore, allow targets to opt out of
// the MergeFunctions pass, but otherwise keep the pass enabled (at
// O2 and O3) since it can be useful for reducing code size.
merge_functions: match sess
.opts
.unstable_opts
.merge_functions
.unwrap_or(sess.target.merge_functions)
{
MergeFunctions::Disabled => false,
MergeFunctions::Trampolines | MergeFunctions::Aliases => {
use config::OptLevel::*;
match sess.opts.optimize {
Aggressive | Default | SizeMin | Size => true,
Less | No => false,
}
}
},
inline_threshold: sess.opts.cg.inline_threshold,
emit_lifetime_markers: sess.emit_lifetime_markers(),
llvm_plugins: if_regular!(sess.opts.unstable_opts.llvm_plugins.clone(), vec![]),
}
}
pub fn bitcode_needed(&self) -> bool {
self.emit_bc
|| self.emit_obj == EmitObj::Bitcode
|| self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
}
}
/// Configuration passed to the function returned by the `target_machine_factory`.
pub struct TargetMachineFactoryConfig {
/// Split DWARF is enabled in LLVM by checking that `TM.MCOptions.SplitDwarfFile` isn't empty,
/// so the path to the dwarf object has to be provided when we create the target machine.
/// This can be ignored by backends which do not need it for their Split DWARF support.
pub split_dwarf_file: Option<PathBuf>,
}
impl TargetMachineFactoryConfig {
pub fn new(
cgcx: &CodegenContext<impl WriteBackendMethods>,
module_name: &str,
) -> TargetMachineFactoryConfig {
let split_dwarf_file = if cgcx.target_can_use_split_dwarf {
cgcx.output_filenames.split_dwarf_path(
cgcx.split_debuginfo,
cgcx.split_dwarf_kind,
Some(module_name),
)
} else {
None
};
TargetMachineFactoryConfig { split_dwarf_file }
}
}
pub type TargetMachineFactoryFn<B> = Arc<
dyn Fn(TargetMachineFactoryConfig) -> Result<<B as WriteBackendMethods>::TargetMachine, String>
+ Send
+ Sync,
>;
pub type ExportedSymbols = FxHashMap<CrateNum, Arc<Vec<(String, SymbolExportInfo)>>>;
/// Additional resources used by optimize_and_codegen (not module specific)
#[derive(Clone)]
pub struct CodegenContext<B: WriteBackendMethods> {
// Resources needed when running LTO
pub backend: B,
pub prof: SelfProfilerRef,
pub lto: Lto,
pub save_temps: bool,
pub fewer_names: bool,
pub time_trace: bool,
pub exported_symbols: Option<Arc<ExportedSymbols>>,
pub opts: Arc<config::Options>,
pub crate_types: Vec<CrateType>,
pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>,
pub output_filenames: Arc<OutputFilenames>,
pub regular_module_config: Arc<ModuleConfig>,
pub metadata_module_config: Arc<ModuleConfig>,
pub allocator_module_config: Arc<ModuleConfig>,
pub tm_factory: TargetMachineFactoryFn<B>,
pub msvc_imps_needed: bool,
pub is_pe_coff: bool,
pub target_can_use_split_dwarf: bool,
pub target_pointer_width: u32,
pub target_arch: String,
pub debuginfo: config::DebugInfo,
pub split_debuginfo: rustc_target::spec::SplitDebuginfo,
pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
// Number of cgus excluding the allocator/metadata modules
pub total_cgus: usize,
// Handler to use for diagnostics produced during codegen.
pub diag_emitter: SharedEmitter,
// LLVM optimizations for which we want to print remarks.
pub remark: Passes,
// Worker thread number
pub worker: usize,
// The incremental compilation session directory, or None if we are not
// compiling incrementally
pub incr_comp_session_dir: Option<PathBuf>,
// Used to update CGU re-use information during the thinlto phase.
pub cgu_reuse_tracker: CguReuseTracker,
// Channel back to the main control thread to send messages to
pub coordinator_send: Sender<Box<dyn Any + Send>>,
}
impl<B: WriteBackendMethods> CodegenContext<B> {
pub fn create_diag_handler(&self) -> Handler {
Handler::with_emitter(true, None, Box::new(self.diag_emitter.clone()))
}
pub fn config(&self, kind: ModuleKind) -> &ModuleConfig {
match kind {
ModuleKind::Regular => &self.regular_module_config,
ModuleKind::Metadata => &self.metadata_module_config,
ModuleKind::Allocator => &self.allocator_module_config,
}
}
}
fn generate_lto_work<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
needs_fat_lto: Vec<FatLTOInput<B>>,
needs_thin_lto: Vec<(String, B::ThinBuffer)>,
import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
) -> Vec<(WorkItem<B>, u64)> {
let _prof_timer = cgcx.prof.generic_activity("codegen_generate_lto_work");
let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
assert!(needs_thin_lto.is_empty());
let lto_module =
B::run_fat_lto(cgcx, needs_fat_lto, import_only_modules).unwrap_or_else(|e| e.raise());
(vec![lto_module], vec![])
} else {
assert!(needs_fat_lto.is_empty());
B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise())
};
lto_modules
.into_iter()
.map(|module| {
let cost = module.cost();
(WorkItem::LTO(module), cost)
})
.chain(copy_jobs.into_iter().map(|wp| {
(
WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
name: wp.cgu_name.clone(),
source: wp,
}),
0,
)
}))
.collect()
}
pub struct CompiledModules {
pub modules: Vec<CompiledModule>,
pub allocator_module: Option<CompiledModule>,
}
fn need_bitcode_in_object(sess: &Session) -> bool {
let requested_for_rlib = sess.opts.cg.embed_bitcode
&& sess.crate_types().contains(&CrateType::Rlib)
&& sess.opts.output_types.contains_key(&OutputType::Exe);
let forced_by_target = sess.target.forces_embed_bitcode;
requested_for_rlib || forced_by_target
}
fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
if sess.opts.incremental.is_none() {
return false;
}
match sess.lto() {
Lto::No => false,
Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
}
}
pub fn start_async_codegen<B: ExtraBackendMethods>(
backend: B,
tcx: TyCtxt<'_>,
target_cpu: String,
metadata: EncodedMetadata,
metadata_module: Option<CompiledModule>,
total_cgus: usize,
) -> OngoingCodegen<B> {
let (coordinator_send, coordinator_receive) = channel();
let sess = tcx.sess;
let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
let no_builtins = tcx.sess.contains_name(crate_attrs, sym::no_builtins);
let is_compiler_builtins = tcx.sess.contains_name(crate_attrs, sym::compiler_builtins);
let crate_info = CrateInfo::new(tcx, target_cpu);
let regular_config =
ModuleConfig::new(ModuleKind::Regular, sess, no_builtins, is_compiler_builtins);
let metadata_config =
ModuleConfig::new(ModuleKind::Metadata, sess, no_builtins, is_compiler_builtins);
let allocator_config =
ModuleConfig::new(ModuleKind::Allocator, sess, no_builtins, is_compiler_builtins);
let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
let (codegen_worker_send, codegen_worker_receive) = channel();
let coordinator_thread = start_executing_work(
backend.clone(),
tcx,
&crate_info,
shared_emitter,
codegen_worker_send,
coordinator_receive,
total_cgus,
sess.jobserver.clone(),
Arc::new(regular_config),
Arc::new(metadata_config),
Arc::new(allocator_config),
coordinator_send.clone(),
);
OngoingCodegen {
backend,
metadata,
metadata_module,
crate_info,
codegen_worker_receive,
shared_emitter_main,
coordinator: Coordinator {
sender: coordinator_send,
future: Some(coordinator_thread),
phantom: PhantomData,
},
output_filenames: tcx.output_filenames(()).clone(),
}
}
fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
sess: &Session,
compiled_modules: &CompiledModules,
) -> FxHashMap<WorkProductId, WorkProduct> {
let mut work_products = FxHashMap::default();
if sess.opts.incremental.is_none() {
return work_products;
}
let _timer = sess.timer("copy_all_cgu_workproducts_to_incr_comp_cache_dir");
for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
let mut files = Vec::new();
if let Some(object_file_path) = &module.object {
files.push(("o", object_file_path.as_path()));
}
if let Some(dwarf_object_file_path) = &module.dwarf_object {
files.push(("dwo", dwarf_object_file_path.as_path()));
}
if let Some((id, product)) =
copy_cgu_workproduct_to_incr_comp_cache_dir(sess, &module.name, files.as_slice())
{
work_products.insert(id, product);
}
}
work_products
}
fn produce_final_output_artifacts(
sess: &Session,
compiled_modules: &CompiledModules,
crate_output: &OutputFilenames,
) {
let mut user_wants_bitcode = false;
let mut user_wants_objects = false;
// Produce final compile outputs.
let copy_gracefully = |from: &Path, to: &Path| {
if let Err(e) = fs::copy(from, to) {
sess.emit_err(errors::CopyPath::new(from, to, e));
}
};
let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
if compiled_modules.modules.len() == 1 {
// 1) Only one codegen unit. In this case it's no difficulty
// to copy `foo.0.x` to `foo.x`.
let module_name = Some(&compiled_modules.modules[0].name[..]);
let path = crate_output.temp_path(output_type, module_name);
copy_gracefully(&path, &crate_output.path(output_type));
if !sess.opts.cg.save_temps && !keep_numbered {
// The user just wants `foo.x`, not `foo.#module-name#.x`.
ensure_removed(sess.diagnostic(), &path);
}
} else {
let extension = crate_output
.temp_path(output_type, None)
.extension()
.unwrap()
.to_str()
.unwrap()
.to_owned();
if crate_output.outputs.contains_key(&output_type) {
// 2) Multiple codegen units, with `--emit foo=some_name`. We have
// no good solution for this case, so warn the user.
sess.emit_warning(errors::IgnoringEmitPath { extension });
} else if crate_output.single_output_file.is_some() {
// 3) Multiple codegen units, with `-o some_name`. We have
// no good solution for this case, so warn the user.
sess.emit_warning(errors::IgnoringOutput { extension });
} else {
// 4) Multiple codegen units, but no explicit name. We
// just leave the `foo.0.x` files in place.
// (We don't have to do any work in this case.)
}
}
};
// Flag to indicate whether the user explicitly requested bitcode.
// Otherwise, we produced it only as a temporary output, and will need
// to get rid of it.
for output_type in crate_output.outputs.keys() {
match *output_type {
OutputType::Bitcode => {
user_wants_bitcode = true;
// Copy to .bc, but always keep the .0.bc. There is a later
// check to figure out if we should delete .0.bc files, or keep
// them for making an rlib.
copy_if_one_unit(OutputType::Bitcode, true);
}
OutputType::LlvmAssembly => {
copy_if_one_unit(OutputType::LlvmAssembly, false);
}
OutputType::Assembly => {
copy_if_one_unit(OutputType::Assembly, false);
}
OutputType::Object => {
user_wants_objects = true;
copy_if_one_unit(OutputType::Object, true);
}
OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
}
}
// Clean up unwanted temporary files.
// We create the following files by default:
// - #crate#.#module-name#.bc
// - #crate#.#module-name#.o
// - #crate#.crate.metadata.bc
// - #crate#.crate.metadata.o
// - #crate#.o (linked from crate.##.o)
// - #crate#.bc (copied from crate.##.bc)
// We may create additional files if requested by the user (through
// `-C save-temps` or `--emit=` flags).
if !sess.opts.cg.save_temps {
// Remove the temporary .#module-name#.o objects. If the user didn't
// explicitly request bitcode (with --emit=bc), and the bitcode is not
// needed for building an rlib, then we must remove .#module-name#.bc as
// well.
// Specific rules for keeping .#module-name#.bc:
// - If the user requested bitcode (`user_wants_bitcode`), and
// codegen_units > 1, then keep it.
// - If the user requested bitcode but codegen_units == 1, then we
// can toss .#module-name#.bc because we copied it to .bc earlier.
// - If we're not building an rlib and the user didn't request
// bitcode, then delete .#module-name#.bc.
// If you change how this works, also update back::link::link_rlib,
// where .#module-name#.bc files are (maybe) deleted after making an
// rlib.
let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1;
let keep_numbered_objects =
needs_crate_object || (user_wants_objects && sess.codegen_units() > 1);
for module in compiled_modules.modules.iter() {
if let Some(ref path) = module.object {
if !keep_numbered_objects {
ensure_removed(sess.diagnostic(), path);
}
}
if let Some(ref path) = module.dwarf_object {
if !keep_numbered_objects {
ensure_removed(sess.diagnostic(), path);
}
}
if let Some(ref path) = module.bytecode {
if !keep_numbered_bitcode {
ensure_removed(sess.diagnostic(), path);
}
}
}
if !user_wants_bitcode {
if let Some(ref allocator_module) = compiled_modules.allocator_module {
if let Some(ref path) = allocator_module.bytecode {
ensure_removed(sess.diagnostic(), path);
}
}
}
}
// We leave the following files around by default:
// - #crate#.o
// - #crate#.crate.metadata.o
// - #crate#.bc
// These are used in linking steps and will be cleaned up afterward.
}
pub enum WorkItem<B: WriteBackendMethods> {
/// Optimize a newly codegened, totally unoptimized module.
Optimize(ModuleCodegen<B::Module>),
/// Copy the post-LTO artifacts from the incremental cache to the output
/// directory.
CopyPostLtoArtifacts(CachedModuleCodegen),
/// Performs (Thin)LTO on the given module.
LTO(lto::LtoModuleCodegen<B>),
}
impl<B: WriteBackendMethods> WorkItem<B> {
pub fn module_kind(&self) -> ModuleKind {
match *self {
WorkItem::Optimize(ref m) => m.kind,
WorkItem::CopyPostLtoArtifacts(_) | WorkItem::LTO(_) => ModuleKind::Regular,
}
}
fn start_profiling<'a>(&self, cgcx: &'a CodegenContext<B>) -> TimingGuard<'a> {
match *self {
WorkItem::Optimize(ref m) => {
cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*m.name)
}
WorkItem::CopyPostLtoArtifacts(ref m) => cgcx
.prof
.generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*m.name),
WorkItem::LTO(ref m) => {
cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", m.name())
}
}
}
/// Generate a short description of this work item suitable for use as a thread name.
fn short_description(&self) -> String {
// `pthread_setname()` on *nix is limited to 15 characters and longer names are ignored.
// Use very short descriptions in this case to maximize the space available for the module name.
// Windows does not have that limitation so use slightly more descriptive names there.
match self {
WorkItem::Optimize(m) => {
#[cfg(windows)]
return format!("optimize module {}", m.name);
#[cfg(not(windows))]
return format!("opt {}", m.name);
}
WorkItem::CopyPostLtoArtifacts(m) => {
#[cfg(windows)]
return format!("copy LTO artifacts for {}", m.name);
#[cfg(not(windows))]
return format!("copy {}", m.name);
}
WorkItem::LTO(m) => {
#[cfg(windows)]
return format!("LTO module {}", m.name());
#[cfg(not(windows))]
return format!("LTO {}", m.name());
}
}
}
}
enum WorkItemResult<B: WriteBackendMethods> {
Compiled(CompiledModule),
NeedsLink(ModuleCodegen<B::Module>),
NeedsFatLTO(FatLTOInput<B>),
NeedsThinLTO(String, B::ThinBuffer),
}
pub enum FatLTOInput<B: WriteBackendMethods> {
Serialized { name: String, buffer: B::ModuleBuffer },
InMemory(ModuleCodegen<B::Module>),
}
fn execute_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
work_item: WorkItem<B>,
) -> Result<WorkItemResult<B>, FatalError> {
let module_config = cgcx.config(work_item.module_kind());
match work_item {
WorkItem::Optimize(module) => execute_optimize_work_item(cgcx, module, module_config),
WorkItem::CopyPostLtoArtifacts(module) => {
Ok(execute_copy_from_cache_work_item(cgcx, module, module_config))
}
WorkItem::LTO(module) => execute_lto_work_item(cgcx, module, module_config),
}
}
// Actual LTO type we end up choosing based on multiple factors.
pub enum ComputedLtoType {
No,
Thin,
Fat,
}
pub fn compute_per_cgu_lto_type(
sess_lto: &Lto,
opts: &config::Options,
sess_crate_types: &[CrateType],
module_kind: ModuleKind,
) -> ComputedLtoType {
// Metadata modules never participate in LTO regardless of the lto
// settings.
if module_kind == ModuleKind::Metadata {
return ComputedLtoType::No;
}
// If the linker does LTO, we don't have to do it. Note that we
// keep doing full LTO, if it is requested, as not to break the
// assumption that the output will be a single module.
let linker_does_lto = opts.cg.linker_plugin_lto.enabled();
// When we're automatically doing ThinLTO for multi-codegen-unit
// builds we don't actually want to LTO the allocator modules if
// it shows up. This is due to various linker shenanigans that
// we'll encounter later.
let is_allocator = module_kind == ModuleKind::Allocator;
// We ignore a request for full crate graph LTO if the crate type
// is only an rlib, as there is no full crate graph to process,
// that'll happen later.
//
// This use case currently comes up primarily for targets that
// require LTO so the request for LTO is always unconditionally
// passed down to the backend, but we don't actually want to do
// anything about it yet until we've got a final product.
let is_rlib = sess_crate_types.len() == 1 && sess_crate_types[0] == CrateType::Rlib;
match sess_lto {
Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
Lto::Fat if !is_rlib => ComputedLtoType::Fat,
_ => ComputedLtoType::No,
}
}
fn execute_optimize_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
module: ModuleCodegen<B::Module>,
module_config: &ModuleConfig,
) -> Result<WorkItemResult<B>, FatalError> {
let diag_handler = cgcx.create_diag_handler();
unsafe {
B::optimize(cgcx, &diag_handler, &module, module_config)?;
}
// After we've done the initial round of optimizations we need to
// decide whether to synchronously codegen this module or ship it
// back to the coordinator thread for further LTO processing (which
// has to wait for all the initial modules to be optimized).
let lto_type = compute_per_cgu_lto_type(&cgcx.lto, &cgcx.opts, &cgcx.crate_types, module.kind);
// If we're doing some form of incremental LTO then we need to be sure to
// save our module to disk first.
let bitcode = if cgcx.config(module.kind).emit_pre_lto_bc {
let filename = pre_lto_bitcode_filename(&module.name);
cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
} else {
None
};
match lto_type {
ComputedLtoType::No => finish_intra_module_work(cgcx, module, module_config),
ComputedLtoType::Thin => {
let (name, thin_buffer) = B::prepare_thin(module);
if let Some(path) = bitcode {
fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
});
}
Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))
}
ComputedLtoType::Fat => match bitcode {
Some(path) => {
let (name, buffer) = B::serialize_module(module);
fs::write(&path, buffer.data()).unwrap_or_else(|e| {
panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
});
Ok(WorkItemResult::NeedsFatLTO(FatLTOInput::Serialized { name, buffer }))
}
None => Ok(WorkItemResult::NeedsFatLTO(FatLTOInput::InMemory(module))),
},
}
}
fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
module: CachedModuleCodegen,
module_config: &ModuleConfig,
) -> WorkItemResult<B> {
assert!(module_config.emit_obj != EmitObj::None);
let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
let load_from_incr_comp_dir = |output_path: PathBuf, saved_path: &str| {
let source_file = in_incr_comp_dir(&incr_comp_session_dir, saved_path);
debug!(
"copying pre-existing module `{}` from {:?} to {}",
module.name,
source_file,
output_path.display()
);
match link_or_copy(&source_file, &output_path) {
Ok(_) => Some(output_path),
Err(error) => {
cgcx.create_diag_handler().emit_err(errors::CopyPathBuf {
source_file,
output_path,
error,
});
None
}
}
};
let object = load_from_incr_comp_dir(
cgcx.output_filenames.temp_path(OutputType::Object, Some(&module.name)),
&module.source.saved_files.get("o").expect("no saved object file in work product"),
);
let dwarf_object =
module.source.saved_files.get("dwo").as_ref().and_then(|saved_dwarf_object_file| {
let dwarf_obj_out = cgcx
.output_filenames
.split_dwarf_path(cgcx.split_debuginfo, cgcx.split_dwarf_kind, Some(&module.name))
.expect(
"saved dwarf object in work product but `split_dwarf_path` returned `None`",
);
load_from_incr_comp_dir(dwarf_obj_out, &saved_dwarf_object_file)
});
WorkItemResult::Compiled(CompiledModule {
name: module.name,
kind: ModuleKind::Regular,
object,
dwarf_object,
bytecode: None,
})
}
fn execute_lto_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
module: lto::LtoModuleCodegen<B>,
module_config: &ModuleConfig,
) -> Result<WorkItemResult<B>, FatalError> {
let module = unsafe { module.optimize(cgcx)? };
finish_intra_module_work(cgcx, module, module_config)
}
fn finish_intra_module_work<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
module: ModuleCodegen<B::Module>,
module_config: &ModuleConfig,
) -> Result<WorkItemResult<B>, FatalError> {
let diag_handler = cgcx.create_diag_handler();
if !cgcx.opts.unstable_opts.combine_cgu
|| module.kind == ModuleKind::Metadata
|| module.kind == ModuleKind::Allocator
{
let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config)? };
Ok(WorkItemResult::Compiled(module))
} else {
Ok(WorkItemResult::NeedsLink(module))
}
}
pub enum Message<B: WriteBackendMethods> {
Token(io::Result<Acquired>),
NeedsFatLTO {
result: FatLTOInput<B>,
worker_id: usize,
},
NeedsThinLTO {
name: String,
thin_buffer: B::ThinBuffer,
worker_id: usize,
},
NeedsLink {
module: ModuleCodegen<B::Module>,
worker_id: usize,
},
Done {
result: Result<CompiledModule, Option<WorkerFatalError>>,
worker_id: usize,
},
CodegenDone {
llvm_work_item: WorkItem<B>,
cost: u64,
},
AddImportOnlyModule {
module_data: SerializedModule<B::ModuleBuffer>,
work_product: WorkProduct,
},
CodegenComplete,
CodegenItem,
CodegenAborted,
}
struct Diagnostic {
msg: String,
code: Option<DiagnosticId>,
lvl: Level,
}
#[derive(PartialEq, Clone, Copy, Debug)]
enum MainThreadWorkerState {
Idle,
Codegenning,
LLVMing,
}
fn start_executing_work<B: ExtraBackendMethods>(
backend: B,
tcx: TyCtxt<'_>,
crate_info: &CrateInfo,
shared_emitter: SharedEmitter,
codegen_worker_send: Sender<Message<B>>,
coordinator_receive: Receiver<Box<dyn Any + Send>>,
total_cgus: usize,
jobserver: Client,
regular_config: Arc<ModuleConfig>,
metadata_config: Arc<ModuleConfig>,
allocator_config: Arc<ModuleConfig>,
tx_to_llvm_workers: Sender<Box<dyn Any + Send>>,
) -> thread::JoinHandle<Result<CompiledModules, ()>> {
let coordinator_send = tx_to_llvm_workers;
let sess = tcx.sess;