@@ -710,12 +710,11 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
710710
711711 WARN_ON_ONCE (!irqs_disabled ());
712712
713- /* If we disabled the tracer, stop now */
714- if (current_trace == & nop_trace )
715- return ;
716-
717- if (WARN_ON_ONCE (!current_trace -> use_max_tr ))
713+ if (!current_trace -> allocated_snapshot ) {
714+ /* Only the nop tracer should hit this when disabling */
715+ WARN_ON_ONCE (current_trace != & nop_trace );
718716 return ;
717+ }
719718
720719 arch_spin_lock (& ftrace_max_lock );
721720
@@ -743,10 +742,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
743742 return ;
744743
745744 WARN_ON_ONCE (!irqs_disabled ());
746- if (!current_trace -> use_max_tr ) {
747- WARN_ON_ONCE (1 );
745+ if (WARN_ON_ONCE (!current_trace -> allocated_snapshot ))
748746 return ;
749- }
750747
751748 arch_spin_lock (& ftrace_max_lock );
752749
@@ -866,10 +863,13 @@ int register_tracer(struct tracer *type)
866863
867864 current_trace = type ;
868865
869- /* If we expanded the buffers, make sure the max is expanded too */
870- if (ring_buffer_expanded && type -> use_max_tr )
871- ring_buffer_resize (max_tr .buffer , trace_buf_size ,
872- RING_BUFFER_ALL_CPUS );
866+ if (type -> use_max_tr ) {
867+ /* If we expanded the buffers, make sure the max is expanded too */
868+ if (ring_buffer_expanded )
869+ ring_buffer_resize (max_tr .buffer , trace_buf_size ,
870+ RING_BUFFER_ALL_CPUS );
871+ type -> allocated_snapshot = true;
872+ }
873873
874874 /* the test is responsible for initializing and enabling */
875875 pr_info ("Testing tracer %s: " , type -> name );
@@ -885,10 +885,14 @@ int register_tracer(struct tracer *type)
885885 /* Only reset on passing, to avoid touching corrupted buffers */
886886 tracing_reset_online_cpus (tr );
887887
888- /* Shrink the max buffer again */
889- if (ring_buffer_expanded && type -> use_max_tr )
890- ring_buffer_resize (max_tr .buffer , 1 ,
891- RING_BUFFER_ALL_CPUS );
888+ if (type -> use_max_tr ) {
889+ type -> allocated_snapshot = false;
890+
891+ /* Shrink the max buffer again */
892+ if (ring_buffer_expanded )
893+ ring_buffer_resize (max_tr .buffer , 1 ,
894+ RING_BUFFER_ALL_CPUS );
895+ }
892896
893897 printk (KERN_CONT "PASSED\n" );
894898 }
@@ -1964,7 +1968,11 @@ static void *s_start(struct seq_file *m, loff_t *pos)
19641968 * iter -> trace = * current_trace ;
19651969 mutex_unlock (& trace_types_lock );
19661970
1967- atomic_inc (& trace_record_cmdline_disabled );
1971+ if (iter -> snapshot && iter -> trace -> use_max_tr )
1972+ return ERR_PTR (- EBUSY );
1973+
1974+ if (!iter -> snapshot )
1975+ atomic_inc (& trace_record_cmdline_disabled );
19681976
19691977 if (* pos != iter -> pos ) {
19701978 iter -> ent = NULL ;
@@ -2003,7 +2011,11 @@ static void s_stop(struct seq_file *m, void *p)
20032011{
20042012 struct trace_iterator * iter = m -> private ;
20052013
2006- atomic_dec (& trace_record_cmdline_disabled );
2014+ if (iter -> snapshot && iter -> trace -> use_max_tr )
2015+ return ;
2016+
2017+ if (!iter -> snapshot )
2018+ atomic_dec (& trace_record_cmdline_disabled );
20072019 trace_access_unlock (iter -> cpu_file );
20082020 trace_event_read_unlock ();
20092021}
@@ -2438,7 +2450,7 @@ static const struct seq_operations tracer_seq_ops = {
24382450};
24392451
24402452static struct trace_iterator *
2441- __tracing_open (struct inode * inode , struct file * file )
2453+ __tracing_open (struct inode * inode , struct file * file , bool snapshot )
24422454{
24432455 long cpu_file = (long ) inode -> i_private ;
24442456 struct trace_iterator * iter ;
@@ -2471,10 +2483,11 @@ __tracing_open(struct inode *inode, struct file *file)
24712483 if (!zalloc_cpumask_var (& iter -> started , GFP_KERNEL ))
24722484 goto fail ;
24732485
2474- if (current_trace && current_trace -> print_max )
2486+ if (( current_trace && current_trace -> print_max ) || snapshot )
24752487 iter -> tr = & max_tr ;
24762488 else
24772489 iter -> tr = & global_trace ;
2490+ iter -> snapshot = snapshot ;
24782491 iter -> pos = -1 ;
24792492 mutex_init (& iter -> mutex );
24802493 iter -> cpu_file = cpu_file ;
@@ -2491,8 +2504,9 @@ __tracing_open(struct inode *inode, struct file *file)
24912504 if (trace_clocks [trace_clock_id ].in_ns )
24922505 iter -> iter_flags |= TRACE_FILE_TIME_IN_NS ;
24932506
2494- /* stop the trace while dumping */
2495- tracing_stop ();
2507+ /* stop the trace while dumping if we are not opening "snapshot" */
2508+ if (!iter -> snapshot )
2509+ tracing_stop ();
24962510
24972511 if (iter -> cpu_file == TRACE_PIPE_ALL_CPU ) {
24982512 for_each_tracing_cpu (cpu ) {
@@ -2555,8 +2569,9 @@ static int tracing_release(struct inode *inode, struct file *file)
25552569 if (iter -> trace && iter -> trace -> close )
25562570 iter -> trace -> close (iter );
25572571
2558- /* reenable tracing if it was previously enabled */
2559- tracing_start ();
2572+ if (!iter -> snapshot )
2573+ /* reenable tracing if it was previously enabled */
2574+ tracing_start ();
25602575 mutex_unlock (& trace_types_lock );
25612576
25622577 mutex_destroy (& iter -> mutex );
@@ -2584,7 +2599,7 @@ static int tracing_open(struct inode *inode, struct file *file)
25842599 }
25852600
25862601 if (file -> f_mode & FMODE_READ ) {
2587- iter = __tracing_open (inode , file );
2602+ iter = __tracing_open (inode , file , false );
25882603 if (IS_ERR (iter ))
25892604 ret = PTR_ERR (iter );
25902605 else if (trace_flags & TRACE_ITER_LATENCY_FMT )
@@ -3219,7 +3234,7 @@ static int tracing_set_tracer(const char *buf)
32193234 if (current_trace && current_trace -> reset )
32203235 current_trace -> reset (tr );
32213236
3222- had_max_tr = current_trace && current_trace -> use_max_tr ;
3237+ had_max_tr = current_trace && current_trace -> allocated_snapshot ;
32233238 current_trace = & nop_trace ;
32243239
32253240 if (had_max_tr && !t -> use_max_tr ) {
@@ -3238,6 +3253,8 @@ static int tracing_set_tracer(const char *buf)
32383253 */
32393254 ring_buffer_resize (max_tr .buffer , 1 , RING_BUFFER_ALL_CPUS );
32403255 set_buffer_entries (& max_tr , 1 );
3256+ tracing_reset_online_cpus (& max_tr );
3257+ current_trace -> allocated_snapshot = false;
32413258 }
32423259 destroy_trace_option_files (topts );
32433260
@@ -3248,6 +3265,7 @@ static int tracing_set_tracer(const char *buf)
32483265 RING_BUFFER_ALL_CPUS );
32493266 if (ret < 0 )
32503267 goto out ;
3268+ t -> allocated_snapshot = true;
32513269 }
32523270
32533271 if (t -> init ) {
@@ -4066,6 +4084,87 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
40664084 return single_open (file , tracing_clock_show , NULL );
40674085}
40684086
4087+ #ifdef CONFIG_TRACER_SNAPSHOT
4088+ static int tracing_snapshot_open (struct inode * inode , struct file * file )
4089+ {
4090+ struct trace_iterator * iter ;
4091+ int ret = 0 ;
4092+
4093+ if (file -> f_mode & FMODE_READ ) {
4094+ iter = __tracing_open (inode , file , true);
4095+ if (IS_ERR (iter ))
4096+ ret = PTR_ERR (iter );
4097+ }
4098+ return ret ;
4099+ }
4100+
4101+ static ssize_t
4102+ tracing_snapshot_write (struct file * filp , const char __user * ubuf , size_t cnt ,
4103+ loff_t * ppos )
4104+ {
4105+ unsigned long val ;
4106+ int ret ;
4107+
4108+ ret = tracing_update_buffers ();
4109+ if (ret < 0 )
4110+ return ret ;
4111+
4112+ ret = kstrtoul_from_user (ubuf , cnt , 10 , & val );
4113+ if (ret )
4114+ return ret ;
4115+
4116+ mutex_lock (& trace_types_lock );
4117+
4118+ if (current_trace && current_trace -> use_max_tr ) {
4119+ ret = - EBUSY ;
4120+ goto out ;
4121+ }
4122+
4123+ switch (val ) {
4124+ case 0 :
4125+ if (current_trace -> allocated_snapshot ) {
4126+ /* free spare buffer */
4127+ ring_buffer_resize (max_tr .buffer , 1 ,
4128+ RING_BUFFER_ALL_CPUS );
4129+ set_buffer_entries (& max_tr , 1 );
4130+ tracing_reset_online_cpus (& max_tr );
4131+ current_trace -> allocated_snapshot = false;
4132+ }
4133+ break ;
4134+ case 1 :
4135+ if (!current_trace -> allocated_snapshot ) {
4136+ /* allocate spare buffer */
4137+ ret = resize_buffer_duplicate_size (& max_tr ,
4138+ & global_trace , RING_BUFFER_ALL_CPUS );
4139+ if (ret < 0 )
4140+ break ;
4141+ current_trace -> allocated_snapshot = true;
4142+ }
4143+
4144+ local_irq_disable ();
4145+ /* Now, we're going to swap */
4146+ update_max_tr (& global_trace , current , smp_processor_id ());
4147+ local_irq_enable ();
4148+ break ;
4149+ default :
4150+ if (current_trace -> allocated_snapshot )
4151+ tracing_reset_online_cpus (& max_tr );
4152+ else
4153+ ret = - EINVAL ;
4154+ break ;
4155+ }
4156+
4157+ if (ret >= 0 ) {
4158+ * ppos += cnt ;
4159+ ret = cnt ;
4160+ }
4161+ out :
4162+ mutex_unlock (& trace_types_lock );
4163+ return ret ;
4164+ }
4165+ #endif /* CONFIG_TRACER_SNAPSHOT */
4166+
4167+
40694168static const struct file_operations tracing_max_lat_fops = {
40704169 .open = tracing_open_generic ,
40714170 .read = tracing_max_lat_read ,
@@ -4122,6 +4221,16 @@ static const struct file_operations trace_clock_fops = {
41224221 .write = tracing_clock_write ,
41234222};
41244223
4224+ #ifdef CONFIG_TRACER_SNAPSHOT
4225+ static const struct file_operations snapshot_fops = {
4226+ .open = tracing_snapshot_open ,
4227+ .read = seq_read ,
4228+ .write = tracing_snapshot_write ,
4229+ .llseek = tracing_seek ,
4230+ .release = tracing_release ,
4231+ };
4232+ #endif /* CONFIG_TRACER_SNAPSHOT */
4233+
41254234struct ftrace_buffer_info {
41264235 struct trace_array * tr ;
41274236 void * spare ;
@@ -4921,6 +5030,11 @@ static __init int tracer_init_debugfs(void)
49215030 & ftrace_update_tot_cnt , & tracing_dyn_info_fops );
49225031#endif
49235032
5033+ #ifdef CONFIG_TRACER_SNAPSHOT
5034+ trace_create_file ("snapshot" , 0644 , d_tracer ,
5035+ (void * ) TRACE_PIPE_ALL_CPU , & snapshot_fops );
5036+ #endif
5037+
49245038 create_trace_options_dir ();
49255039
49265040 for_each_tracing_cpu (cpu )
0 commit comments