1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
|
/*
* Infrastructure for statistic tracing (histogram output).
*
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
*
* Based on the code from trace_branch.c which is
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
*
*/
#include <linux/list.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include "trace.h"
/* List of stat entries from a tracer */
struct trace_stat_list {
struct list_head list;
void *stat;
};
static struct trace_stat_list stat_list;
/*
* This is a copy of the current tracer to avoid racy
* and dangerous output while the current tracer is
* switched.
*/
static struct tracer current_tracer;
/*
* Protect both the current tracer and the global
* stat list.
*/
static DEFINE_MUTEX(stat_list_mutex);
static void reset_stat_list(void)
{
struct trace_stat_list *node;
struct list_head *next;
if (list_empty(&stat_list.list))
return;
node = list_entry(stat_list.list.next, struct trace_stat_list, list);
next = node->list.next;
while (&node->list != next) {
kfree(node);
node = list_entry(next, struct trace_stat_list, list);
}
kfree(node);
INIT_LIST_HEAD(&stat_list.list);
}
void init_tracer_stat(struct tracer *trace)
{
mutex_lock(&stat_list_mutex);
current_tracer = *trace;
mutex_unlock(&stat_list_mutex);
}
/*
* For tracers that don't provide a stat_cmp callback.
* This one will force an immediate insertion on tail of
* the list.
*/
static int dummy_cmp(void *p1, void *p2)
{
return 1;
}
/*
* Initialize the stat list at each trace_stat file opening.
* All of these copies and sorting are required on all opening
* since the stats could have changed between two file sessions.
*/
static int stat_seq_init(void)
{
struct trace_stat_list *iter_entry, *new_entry;
void *prev_stat;
int ret = 0;
int i;
mutex_lock(&stat_list_mutex);
reset_stat_list();
if (!current_tracer.stat_start || !current_tracer.stat_next ||
!current_tracer.stat_show)
goto exit;
if (!current_tracer.stat_cmp)
current_tracer.stat_cmp = dummy_cmp;
/*
* The first entry. Actually this is the second, but the first
* one (the stat_list head) is pointless.
*/
new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
if (!new_entry) {
ret = -ENOMEM;
goto exit;
}
INIT_LIST_HEAD(&new_entry->list);
list_add(&new_entry->list, &stat_list.list);
new_entry->stat = current_tracer.stat_start();
prev_stat = new_entry->stat;
/*
* Iterate over the tracer stat entries and store them in a sorted
* list.
*/
for (i = 1; ; i++) {
new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
if (!new_entry) {
ret = -ENOMEM;
goto exit_free_list;
}
INIT_LIST_HEAD(&new_entry->list);
new_entry->stat = current_tracer.stat_next(prev_stat, i);
/* End of insertion */
if (!new_entry->stat)
break;
list_for_each_entry(iter_entry, &stat_list.list, list) {
/* Insertion with a descendent sorting */
if (current_tracer.stat_cmp(new_entry->stat,
iter_entry->stat) > 0) {
list_add_tail(&new_entry->list,
&iter_entry->list);
break;
/* The current smaller value */
} else if (list_is_last(&iter_entry->list,
&stat_list.list)) {
list_add(&new_entry->list, &iter_entry->list);
break;
}
}
prev_stat = new_entry->stat;
}
exit:
mutex_unlock(&stat_list_mutex);
return ret;
exit_free_list:
reset_stat_list();
mutex_unlock(&stat_list_mutex);
return ret;
}
static void *stat_seq_start(struct seq_file *s, loff_t *pos)
{
struct trace_stat_list *l = (struct trace_stat_list *)s->private;
/* Prevent from tracer switch or stat_list modification */
mutex_lock(&stat_list_mutex);
/* If we are in the beginning of the file, print the headers */
if (!*pos && current_tracer.stat_headers)
current_tracer.stat_headers(s);
return seq_list_start(&l->list, *pos);
}
static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
{
struct trace_stat_list *l = (struct trace_stat_list *)s->private;
return seq_list_next(p, &l->list, pos);
}
static void stat_seq_stop(struct seq_file *m, void *p)
{
mutex_unlock(&stat_list_mutex);
}
static int stat_seq_show(struct seq_file *s, void *v)
{
struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list);
return current_tracer.stat_show(s, l->stat);
}
static const struct seq_operations trace_stat_seq_ops = {
.start = stat_seq_start,
.next = stat_seq_next,
.stop = stat_seq_stop,
.show = stat_seq_show
};
static int tracing_stat_open(struct inode *inode, struct file *file)
{
int ret;
ret = seq_open(file, &trace_stat_seq_ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = &stat_list;
ret = stat_seq_init();
}
return ret;
}
/*
* Avoid consuming memory with our now useless list.
*/
static int tracing_stat_release(struct inode *i, struct file *f)
{
mutex_lock(&stat_list_mutex);
reset_stat_list();
mutex_unlock(&stat_list_mutex);
return 0;
}
static const struct file_operations tracing_stat_fops = {
.open = tracing_stat_open,
.read = seq_read,
.llseek = seq_lseek,
.release = tracing_stat_release
};
static int __init tracing_stat_init(void)
{
struct dentry *d_tracing;
struct dentry *entry;
INIT_LIST_HEAD(&stat_list.list);
d_tracing = tracing_init_dentry();
entry = debugfs_create_file("trace_stat", 0444, d_tracing,
NULL,
&tracing_stat_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'trace_stat' entry\n");
return 0;
}
fs_initcall(tracing_stat_init);
|