Return bool from reset_alarm_* apis.
[akaros.git] / kern / src / oprofile / event_buffer.c
1 /**
2  * @file event_buffer.c
3  *
4  * @remark Copyright 2002 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author John Levon <levon@movementarian.org>
8  *
9  * This is the global event buffer that the user-space
10  * daemon reads from. The event buffer is an untyped array
11  * of unsigned longs. Entries are prefixed by the
12  * escape value ESCAPE_CODE followed by an identifying code.
13  */
14
15 #include "oprof.h"
16 #include "event_buffer.h"
17 #include "oprofile_stats.h"
18
19 DEFINE_MUTEX(buffer_mutex);
20
21 static unsigned long buffer_opened;
22 static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
23 static unsigned long *event_buffer;
24 static unsigned long buffer_size;
25 static unsigned long buffer_watershed;
26 static size_t buffer_pos;
27 /* atomic_t because wait_event checks it outside of buffer_mutex */
28 static atomic_t buffer_ready = ATOMIC_INIT(0);
29
30 /*
31  * Add an entry to the event buffer. When we get near to the end we
32  * wake up the process sleeping on the read() of the file. To protect
33  * the event_buffer this function may only be called when buffer_mutex
34  * is set.
35  */
36 void add_event_entry(unsigned long value)
37 {
38         /*
39          * This shouldn't happen since all workqueues or handlers are
40          * canceled or flushed before the event buffer is freed.
41          */
42         if (!event_buffer) {
43                 WARN_ON_ONCE(1);
44                 return;
45         }
46
47         if (buffer_pos == buffer_size) {
48                 atomic_inc(&oprofile_stats.event_lost_overflow);
49                 return;
50         }
51
52         event_buffer[buffer_pos] = value;
53         if (++buffer_pos == buffer_size - buffer_watershed) {
54                 atomic_set(&buffer_ready, 1);
55                 wake_up(&buffer_wait);
56         }
57 }
58
59
60 /* Wake up the waiting process if any. This happens
61  * on "echo 0 >/dev/oprofile/enable" so the daemon
62  * processes the data remaining in the event buffer.
63  */
64 void wake_up_buffer_waiter(void)
65 {
66         mutex_lock(&buffer_mutex);
67         atomic_set(&buffer_ready, 1);
68         wake_up(&buffer_wait);
69         mutex_unlock(&buffer_mutex);
70 }
71
72
73 int alloc_event_buffer(void)
74 {
75         unsigned long flags;
76
77         raw_spin_lock_irqsave(&oprofilefs_lock, flags);
78         buffer_size = oprofile_buffer_size;
79         buffer_watershed = oprofile_buffer_watershed;
80         raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
81
82         if (buffer_watershed >= buffer_size)
83                 return -EINVAL;
84
85         buffer_pos = 0;
86         event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
87         if (!event_buffer)
88                 return -ENOMEM;
89
90         return 0;
91 }
92
93
94 void free_event_buffer(void)
95 {
96         mutex_lock(&buffer_mutex);
97         vfree(event_buffer);
98         buffer_pos = 0;
99         event_buffer = NULL;
100         mutex_unlock(&buffer_mutex);
101 }
102
103
104 static int event_buffer_open(struct inode *inode, struct file *file)
105 {
106         int err = -EPERM;
107
108         if (!capable(CAP_SYS_ADMIN))
109                 return -EPERM;
110
111         if (test_and_set_bit_lock(0, &buffer_opened))
112                 return -EBUSY;
113
114         /* Register as a user of dcookies
115          * to ensure they persist for the lifetime of
116          * the open event file
117          */
118         err = -EINVAL;
119         file->private_data = dcookie_register();
120         if (!file->private_data)
121                 goto out;
122
123         if ((err = oprofile_setup()))
124                 goto fail;
125
126         /* NB: the actual start happens from userspace
127          * echo 1 >/dev/oprofile/enable
128          */
129
130         return nonseekable_open(inode, file);
131
132 fail:
133         dcookie_unregister(file->private_data);
134 out:
135         __clear_bit_unlock(0, &buffer_opened);
136         return err;
137 }
138
139
140 static int event_buffer_release(struct inode *inode, struct file *file)
141 {
142         oprofile_stop();
143         oprofile_shutdown();
144         dcookie_unregister(file->private_data);
145         buffer_pos = 0;
146         atomic_set(&buffer_ready, 0);
147         __clear_bit_unlock(0, &buffer_opened);
148         return 0;
149 }
150
151
152 static ssize_t event_buffer_read(struct file *file, char __user *buf,
153                                  size_t count, loff_t *offset)
154 {
155         int retval = -EINVAL;
156         size_t const max = buffer_size * sizeof(unsigned long);
157
158         /* handling partial reads is more trouble than it's worth */
159         if (count != max || *offset)
160                 return -EINVAL;
161
162         wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
163
164         if (signal_pending(current))
165                 return -EINTR;
166
167         /* can't currently happen */
168         if (!atomic_read(&buffer_ready))
169                 return -EAGAIN;
170
171         mutex_lock(&buffer_mutex);
172
173         /* May happen if the buffer is freed during pending reads. */
174         if (!event_buffer) {
175                 retval = -EINTR;
176                 goto out;
177         }
178
179         atomic_set(&buffer_ready, 0);
180
181         retval = -EFAULT;
182
183         count = buffer_pos * sizeof(unsigned long);
184
185         if (copy_to_user(buf, event_buffer, count))
186                 goto out;
187
188         retval = count;
189         buffer_pos = 0;
190
191 out:
192         mutex_unlock(&buffer_mutex);
193         return retval;
194 }
195