Skip to content

Commit 3928a8a

Browse files
rostedtIngo Molnar
authored andcommitted
ftrace: make work with new ring buffer
This patch ports ftrace over to the new ring buffer. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
1 parent ed56829 commit 3928a8a

File tree

11 files changed

+288
-798
lines changed

11 files changed

+288
-798
lines changed

kernel/trace/trace.c

Lines changed: 234 additions & 698 deletions
Large diffs are not rendered by default.

kernel/trace/trace.h

Lines changed: 5 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include <asm/atomic.h>
66
#include <linux/sched.h>
77
#include <linux/clocksource.h>
8+
#include <linux/ring_buffer.h>
89
#include <linux/mmiotrace.h>
910
#include <linux/ftrace.h>
1011

@@ -102,7 +103,6 @@ struct trace_field {
102103
char flags;
103104
char preempt_count;
104105
int pid;
105-
cycle_t t;
106106
union {
107107
struct ftrace_entry fn;
108108
struct ctx_switch_entry ctx;
@@ -139,16 +139,9 @@ struct trace_entry {
139139
* the trace, etc.)
140140
*/
141141
struct trace_array_cpu {
142-
struct list_head trace_pages;
143142
atomic_t disabled;
144-
raw_spinlock_t lock;
145-
struct lock_class_key lock_key;
146143

147144
/* these fields get copied into max-trace: */
148-
unsigned trace_head_idx;
149-
unsigned trace_tail_idx;
150-
void *trace_head; /* producer */
151-
void *trace_tail; /* consumer */
152145
unsigned long trace_idx;
153146
unsigned long overrun;
154147
unsigned long saved_latency;
@@ -172,6 +165,7 @@ struct trace_iterator;
172165
* They have on/off state as well:
173166
*/
174167
struct trace_array {
168+
struct ring_buffer *buffer;
175169
unsigned long entries;
176170
long ctrl;
177171
int cpu;
@@ -219,27 +213,21 @@ struct trace_iterator {
219213
struct trace_array *tr;
220214
struct tracer *trace;
221215
void *private;
222-
long last_overrun[NR_CPUS];
223-
long overrun[NR_CPUS];
216+
struct ring_buffer_iter *buffer_iter[NR_CPUS];
224217

225218
/* The below is zeroed out in pipe_read */
226219
struct trace_seq seq;
227220
struct trace_entry *ent;
228221
int cpu;
229-
230-
struct trace_entry *prev_ent;
231-
int prev_cpu;
222+
u64 ts;
232223

233224
unsigned long iter_flags;
234225
loff_t pos;
235-
unsigned long next_idx[NR_CPUS];
236-
struct list_head *next_page[NR_CPUS];
237-
unsigned next_page_idx[NR_CPUS];
238226
long idx;
239227
};
240228

241229
void trace_wake_up(void);
242-
void tracing_reset(struct trace_array_cpu *data);
230+
void tracing_reset(struct trace_array *tr, int cpu);
243231
int tracing_open_generic(struct inode *inode, struct file *filp);
244232
struct dentry *tracing_init_dentry(void);
245233
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);

kernel/trace/trace_boot.c

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ static void boot_trace_init(struct trace_array *tr)
3434
trace_boot_enabled = 0;
3535

3636
for_each_cpu_mask(cpu, cpu_possible_map)
37-
tracing_reset(tr->data[cpu]);
37+
tracing_reset(tr, cpu);
3838
}
3939

4040
static void boot_trace_ctrl_update(struct trace_array *tr)
@@ -74,6 +74,7 @@ struct tracer boot_tracer __read_mostly =
7474

7575
void trace_boot(struct boot_trace *it)
7676
{
77+
struct ring_buffer_event *event;
7778
struct trace_entry *entry;
7879
struct trace_array_cpu *data;
7980
unsigned long irq_flags;
@@ -85,17 +86,18 @@ void trace_boot(struct boot_trace *it)
8586
preempt_disable();
8687
data = tr->data[smp_processor_id()];
8788

88-
raw_local_irq_save(irq_flags);
89-
__raw_spin_lock(&data->lock);
90-
91-
entry = tracing_get_trace_entry(tr, data);
89+
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
90+
&irq_flags);
91+
if (!event)
92+
goto out;
93+
entry = ring_buffer_event_data(event);
9294
tracing_generic_entry_update(entry, 0);
9395
entry->type = TRACE_BOOT;
9496
entry->field.initcall = *it;
97+
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
9598

96-
__raw_spin_unlock(&data->lock);
97-
raw_local_irq_restore(irq_flags);
9899
trace_wake_up();
99100

101+
out:
100102
preempt_enable();
101103
}

kernel/trace/trace_functions.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ static void function_reset(struct trace_array *tr)
2323
tr->time_start = ftrace_now(tr->cpu);
2424

2525
for_each_online_cpu(cpu)
26-
tracing_reset(tr->data[cpu]);
26+
tracing_reset(tr, cpu);
2727
}
2828

2929
static void start_function_trace(struct trace_array *tr)

kernel/trace/trace_irqsoff.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ check_critical_timing(struct trace_array *tr,
173173
out:
174174
data->critical_sequence = max_sequence;
175175
data->preempt_timestamp = ftrace_now(cpu);
176-
tracing_reset(data);
176+
tracing_reset(tr, cpu);
177177
trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
178178
}
179179

@@ -203,7 +203,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
203203
data->critical_sequence = max_sequence;
204204
data->preempt_timestamp = ftrace_now(cpu);
205205
data->critical_start = parent_ip ? : ip;
206-
tracing_reset(data);
206+
tracing_reset(tr, cpu);
207207

208208
local_save_flags(flags);
209209

@@ -234,7 +234,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
234234

235235
data = tr->data[cpu];
236236

237-
if (unlikely(!data) || unlikely(!head_page(data)) ||
237+
if (unlikely(!data) ||
238238
!data->critical_start || atomic_read(&data->disabled))
239239
return;
240240

kernel/trace/trace_mmiotrace.c

Lines changed: 22 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ static void mmio_reset_data(struct trace_array *tr)
2727
tr->time_start = ftrace_now(tr->cpu);
2828

2929
for_each_online_cpu(cpu)
30-
tracing_reset(tr->data[cpu]);
30+
tracing_reset(tr, cpu);
3131
}
3232

3333
static void mmio_trace_init(struct trace_array *tr)
@@ -130,10 +130,14 @@ static unsigned long count_overruns(struct trace_iterator *iter)
130130
{
131131
int cpu;
132132
unsigned long cnt = 0;
133+
/* FIXME: */
134+
#if 0
133135
for_each_online_cpu(cpu) {
134136
cnt += iter->overrun[cpu];
135137
iter->overrun[cpu] = 0;
136138
}
139+
#endif
140+
(void)cpu;
137141
return cnt;
138142
}
139143

@@ -176,7 +180,7 @@ static int mmio_print_rw(struct trace_iterator *iter)
176180
struct trace_entry *entry = iter->ent;
177181
struct mmiotrace_rw *rw = &entry->field.mmiorw;
178182
struct trace_seq *s = &iter->seq;
179-
unsigned long long t = ns2usecs(entry->field.t);
183+
unsigned long long t = ns2usecs(iter->ts);
180184
unsigned long usec_rem = do_div(t, 1000000ULL);
181185
unsigned secs = (unsigned long)t;
182186
int ret = 1;
@@ -218,7 +222,7 @@ static int mmio_print_map(struct trace_iterator *iter)
218222
struct trace_entry *entry = iter->ent;
219223
struct mmiotrace_map *m = &entry->field.mmiomap;
220224
struct trace_seq *s = &iter->seq;
221-
unsigned long long t = ns2usecs(entry->field.t);
225+
unsigned long long t = ns2usecs(iter->ts);
222226
unsigned long usec_rem = do_div(t, 1000000ULL);
223227
unsigned secs = (unsigned long)t;
224228
int ret = 1;
@@ -250,7 +254,7 @@ static int mmio_print_mark(struct trace_iterator *iter)
250254
struct trace_entry *entry = iter->ent;
251255
const char *msg = entry->field.print.buf;
252256
struct trace_seq *s = &iter->seq;
253-
unsigned long long t = ns2usecs(entry->field.t);
257+
unsigned long long t = ns2usecs(iter->ts);
254258
unsigned long usec_rem = do_div(t, 1000000ULL);
255259
unsigned secs = (unsigned long)t;
256260
int ret;
@@ -303,19 +307,19 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
303307
struct trace_array_cpu *data,
304308
struct mmiotrace_rw *rw)
305309
{
310+
struct ring_buffer_event *event;
306311
struct trace_entry *entry;
307312
unsigned long irq_flags;
308313

309-
raw_local_irq_save(irq_flags);
310-
__raw_spin_lock(&data->lock);
311-
312-
entry = tracing_get_trace_entry(tr, data);
314+
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
315+
&irq_flags);
316+
if (!event)
317+
return;
318+
entry = ring_buffer_event_data(event);
313319
tracing_generic_entry_update(entry, 0);
314320
entry->type = TRACE_MMIO_RW;
315321
entry->field.mmiorw = *rw;
316-
317-
__raw_spin_unlock(&data->lock);
318-
raw_local_irq_restore(irq_flags);
322+
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
319323

320324
trace_wake_up();
321325
}
@@ -331,19 +335,19 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
331335
struct trace_array_cpu *data,
332336
struct mmiotrace_map *map)
333337
{
338+
struct ring_buffer_event *event;
334339
struct trace_entry *entry;
335340
unsigned long irq_flags;
336341

337-
raw_local_irq_save(irq_flags);
338-
__raw_spin_lock(&data->lock);
339-
340-
entry = tracing_get_trace_entry(tr, data);
342+
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
343+
&irq_flags);
344+
if (!event)
345+
return;
346+
entry = ring_buffer_event_data(event);
341347
tracing_generic_entry_update(entry, 0);
342348
entry->type = TRACE_MMIO_MAP;
343349
entry->field.mmiomap = *map;
344-
345-
__raw_spin_unlock(&data->lock);
346-
raw_local_irq_restore(irq_flags);
350+
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
347351

348352
trace_wake_up();
349353
}

kernel/trace/trace_nop.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ static void nop_trace_init(struct trace_array *tr)
3030
ctx_trace = tr;
3131

3232
for_each_online_cpu(cpu)
33-
tracing_reset(tr->data[cpu]);
33+
tracing_reset(tr, cpu);
3434

3535
if (tr->ctrl)
3636
start_nop_trace(tr);

kernel/trace/trace_sched_switch.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ static void sched_switch_reset(struct trace_array *tr)
8181
tr->time_start = ftrace_now(tr->cpu);
8282

8383
for_each_online_cpu(cpu)
84-
tracing_reset(tr->data[cpu]);
84+
tracing_reset(tr, cpu);
8585
}
8686

8787
static int tracing_sched_register(void)

kernel/trace/trace_sched_wakeup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ static void __wakeup_reset(struct trace_array *tr)
191191

192192
for_each_possible_cpu(cpu) {
193193
data = tr->data[cpu];
194-
tracing_reset(data);
194+
tracing_reset(tr, cpu);
195195
}
196196

197197
wakeup_cpu = -1;

kernel/trace/trace_selftest.c

Lines changed: 10 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -18,58 +18,20 @@ static inline int trace_valid_entry(struct trace_entry *entry)
1818
return 0;
1919
}
2020

21-
static int
22-
trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
21+
static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
2322
{
24-
struct trace_entry *entries;
25-
struct page *page;
26-
int idx = 0;
27-
int i;
23+
struct ring_buffer_event *event;
24+
struct trace_entry *entry;
2825

29-
BUG_ON(list_empty(&data->trace_pages));
30-
page = list_entry(data->trace_pages.next, struct page, lru);
31-
entries = page_address(page);
26+
while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
27+
entry = ring_buffer_event_data(event);
3228

33-
check_pages(data);
34-
if (head_page(data) != entries)
35-
goto failed;
36-
37-
/*
38-
* The starting trace buffer always has valid elements,
39-
* if any element exists.
40-
*/
41-
entries = head_page(data);
42-
43-
for (i = 0; i < tr->entries; i++) {
44-
45-
if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
29+
if (!trace_valid_entry(entry)) {
4630
printk(KERN_CONT ".. invalid entry %d ",
47-
entries[idx].type);
31+
entry->type);
4832
goto failed;
4933
}
50-
51-
idx++;
52-
if (idx >= ENTRIES_PER_PAGE) {
53-
page = virt_to_page(entries);
54-
if (page->lru.next == &data->trace_pages) {
55-
if (i != tr->entries - 1) {
56-
printk(KERN_CONT ".. entries buffer mismatch");
57-
goto failed;
58-
}
59-
} else {
60-
page = list_entry(page->lru.next, struct page, lru);
61-
entries = page_address(page);
62-
}
63-
idx = 0;
64-
}
65-
}
66-
67-
page = virt_to_page(entries);
68-
if (page->lru.next != &data->trace_pages) {
69-
printk(KERN_CONT ".. too many entries");
70-
goto failed;
7134
}
72-
7335
return 0;
7436

7537
failed:
@@ -91,13 +53,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
9153
/* Don't allow flipping of max traces now */
9254
raw_local_irq_save(flags);
9355
__raw_spin_lock(&ftrace_max_lock);
94-
for_each_possible_cpu(cpu) {
95-
if (!head_page(tr->data[cpu]))
96-
continue;
9756

98-
cnt += tr->data[cpu]->trace_idx;
57+
cnt = ring_buffer_entries(tr->buffer);
9958

100-
ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
59+
for_each_possible_cpu(cpu) {
60+
ret = trace_test_buffer_cpu(tr, cpu);
10161
if (ret)
10262
break;
10363
}

0 commit comments

Comments
 (0)