Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. |
| 3 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. |
| 8 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write the Free Software Foundation, |
| 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <xfs.h> |
| 19 | |
| 20 | static kmem_zone_t *ktrace_hdr_zone; |
| 21 | static kmem_zone_t *ktrace_ent_zone; |
| 22 | static int ktrace_zentries; |
| 23 | |
Lachlan McIlroy | de2eeea | 2008-02-06 13:37:56 +1100 | [diff] [blame] | 24 | void __init |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | ktrace_init(int zentries) |
| 26 | { |
David Chinner | d234154 | 2008-03-06 13:45:43 +1100 | [diff] [blame] | 27 | ktrace_zentries = roundup_pow_of_two(zentries); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
| 29 | ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), |
| 30 | "ktrace_hdr"); |
| 31 | ASSERT(ktrace_hdr_zone); |
| 32 | |
| 33 | ktrace_ent_zone = kmem_zone_init(ktrace_zentries |
| 34 | * sizeof(ktrace_entry_t), |
| 35 | "ktrace_ent"); |
| 36 | ASSERT(ktrace_ent_zone); |
| 37 | } |
| 38 | |
Lachlan McIlroy | de2eeea | 2008-02-06 13:37:56 +1100 | [diff] [blame] | 39 | void __exit |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | ktrace_uninit(void) |
| 41 | { |
Nathan Scott | 3758dee | 2006-03-22 12:47:28 +1100 | [diff] [blame] | 42 | kmem_zone_destroy(ktrace_hdr_zone); |
| 43 | kmem_zone_destroy(ktrace_ent_zone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | /* |
| 47 | * ktrace_alloc() |
| 48 | * |
| 49 | * Allocate a ktrace header and enough buffering for the given |
David Chinner | d234154 | 2008-03-06 13:45:43 +1100 | [diff] [blame] | 50 | * number of entries. Round the number of entries up to a |
| 51 | * power of 2 so we can do fast masking to get the index from |
| 52 | * the atomic index counter. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | */ |
| 54 | ktrace_t * |
Christoph Hellwig | 4750ddb | 2005-11-02 15:07:23 +1100 | [diff] [blame] | 55 | ktrace_alloc(int nentries, unsigned int __nocast sleep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | { |
| 57 | ktrace_t *ktp; |
| 58 | ktrace_entry_t *ktep; |
David Chinner | d234154 | 2008-03-06 13:45:43 +1100 | [diff] [blame] | 59 | int entries; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
| 61 | ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); |
| 62 | |
| 63 | if (ktp == (ktrace_t*)NULL) { |
| 64 | /* |
| 65 | * KM_SLEEP callers don't expect failure. |
| 66 | */ |
| 67 | if (sleep & KM_SLEEP) |
| 68 | panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); |
| 69 | |
| 70 | return NULL; |
| 71 | } |
| 72 | |
| 73 | /* |
| 74 | * Special treatment for buffers with the ktrace_zentries entries |
| 75 | */ |
David Chinner | d234154 | 2008-03-06 13:45:43 +1100 | [diff] [blame] | 76 | entries = roundup_pow_of_two(nentries); |
| 77 | if (entries == ktrace_zentries) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, |
| 79 | sleep); |
| 80 | } else { |
David Chinner | d234154 | 2008-03-06 13:45:43 +1100 | [diff] [blame] | 81 | ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)), |
Nathan Scott | efb8ad7 | 2006-09-28 11:03:05 +1000 | [diff] [blame] | 82 | sleep | KM_LARGE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | } |
| 84 | |
| 85 | if (ktep == NULL) { |
| 86 | /* |
| 87 | * KM_SLEEP callers don't expect failure. |
| 88 | */ |
| 89 | if (sleep & KM_SLEEP) |
| 90 | panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); |
| 91 | |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 92 | kmem_free(ktp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
| 94 | return NULL; |
| 95 | } |
| 96 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | ktp->kt_entries = ktep; |
David Chinner | d234154 | 2008-03-06 13:45:43 +1100 | [diff] [blame] | 98 | ktp->kt_nentries = entries; |
| 99 | ASSERT(is_power_of_2(entries)); |
| 100 | ktp->kt_index_mask = entries - 1; |
David Chinner | 6ee4752 | 2008-03-06 13:45:35 +1100 | [diff] [blame] | 101 | atomic_set(&ktp->kt_index, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | ktp->kt_rollover = 0; |
| 103 | return ktp; |
| 104 | } |
| 105 | |
| 106 | |
| 107 | /* |
| 108 | * ktrace_free() |
| 109 | * |
| 110 | * Free up the ktrace header and buffer. It is up to the caller |
| 111 | * to ensure that no-one is referencing it. |
| 112 | */ |
| 113 | void |
| 114 | ktrace_free(ktrace_t *ktp) |
| 115 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | if (ktp == (ktrace_t *)NULL) |
| 117 | return; |
| 118 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | /* |
| 120 | * Special treatment for the Vnode trace buffer. |
| 121 | */ |
Lachlan McIlroy | a5b429d | 2008-12-05 13:31:51 +1100 | [diff] [blame] | 122 | if (ktp->kt_nentries == ktrace_zentries) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | kmem_zone_free(ktrace_ent_zone, ktp->kt_entries); |
Lachlan McIlroy | a5b429d | 2008-12-05 13:31:51 +1100 | [diff] [blame] | 124 | else |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 125 | kmem_free(ktp->kt_entries); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | |
| 127 | kmem_zone_free(ktrace_hdr_zone, ktp); |
| 128 | } |
| 129 | |
| 130 | |
| 131 | /* |
| 132 | * Enter the given values into the "next" entry in the trace buffer. |
| 133 | * kt_index is always the index of the next entry to be filled. |
| 134 | */ |
| 135 | void |
| 136 | ktrace_enter( |
| 137 | ktrace_t *ktp, |
| 138 | void *val0, |
| 139 | void *val1, |
| 140 | void *val2, |
| 141 | void *val3, |
| 142 | void *val4, |
| 143 | void *val5, |
| 144 | void *val6, |
| 145 | void *val7, |
| 146 | void *val8, |
| 147 | void *val9, |
| 148 | void *val10, |
| 149 | void *val11, |
| 150 | void *val12, |
| 151 | void *val13, |
| 152 | void *val14, |
| 153 | void *val15) |
| 154 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | int index; |
| 156 | ktrace_entry_t *ktep; |
| 157 | |
| 158 | ASSERT(ktp != NULL); |
| 159 | |
| 160 | /* |
| 161 | * Grab an entry by pushing the index up to the next one. |
| 162 | */ |
David Chinner | 6ee4752 | 2008-03-06 13:45:35 +1100 | [diff] [blame] | 163 | index = atomic_add_return(1, &ktp->kt_index); |
David Chinner | d234154 | 2008-03-06 13:45:43 +1100 | [diff] [blame] | 164 | index = (index - 1) & ktp->kt_index_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) |
| 166 | ktp->kt_rollover = 1; |
| 167 | |
| 168 | ASSERT((index >= 0) && (index < ktp->kt_nentries)); |
| 169 | |
| 170 | ktep = &(ktp->kt_entries[index]); |
| 171 | |
| 172 | ktep->val[0] = val0; |
| 173 | ktep->val[1] = val1; |
| 174 | ktep->val[2] = val2; |
| 175 | ktep->val[3] = val3; |
| 176 | ktep->val[4] = val4; |
| 177 | ktep->val[5] = val5; |
| 178 | ktep->val[6] = val6; |
| 179 | ktep->val[7] = val7; |
| 180 | ktep->val[8] = val8; |
| 181 | ktep->val[9] = val9; |
| 182 | ktep->val[10] = val10; |
| 183 | ktep->val[11] = val11; |
| 184 | ktep->val[12] = val12; |
| 185 | ktep->val[13] = val13; |
| 186 | ktep->val[14] = val14; |
| 187 | ktep->val[15] = val15; |
| 188 | } |
| 189 | |
| 190 | /* |
| 191 | * Return the number of entries in the trace buffer. |
| 192 | */ |
| 193 | int |
| 194 | ktrace_nentries( |
| 195 | ktrace_t *ktp) |
| 196 | { |
David Chinner | 6ee4752 | 2008-03-06 13:45:35 +1100 | [diff] [blame] | 197 | int index; |
| 198 | if (ktp == NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | |
David Chinner | d234154 | 2008-03-06 13:45:43 +1100 | [diff] [blame] | 201 | index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; |
David Chinner | 6ee4752 | 2008-03-06 13:45:35 +1100 | [diff] [blame] | 202 | return (ktp->kt_rollover ? ktp->kt_nentries : index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | /* |
| 206 | * ktrace_first() |
| 207 | * |
| 208 | * This is used to find the start of the trace buffer. |
| 209 | * In conjunction with ktrace_next() it can be used to |
| 210 | * iterate through the entire trace buffer. This code does |
| 211 | * not do any locking because it is assumed that it is called |
| 212 | * from the debugger. |
| 213 | * |
| 214 | * The caller must pass in a pointer to a ktrace_snap |
| 215 | * structure in which we will keep some state used to |
| 216 | * iterate through the buffer. This state must not touched |
| 217 | * by any code outside of this module. |
| 218 | */ |
| 219 | ktrace_entry_t * |
| 220 | ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp) |
| 221 | { |
| 222 | ktrace_entry_t *ktep; |
| 223 | int index; |
| 224 | int nentries; |
| 225 | |
| 226 | if (ktp->kt_rollover) |
David Chinner | d234154 | 2008-03-06 13:45:43 +1100 | [diff] [blame] | 227 | index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | else |
| 229 | index = 0; |
| 230 | |
| 231 | ktsp->ks_start = index; |
| 232 | ktep = &(ktp->kt_entries[index]); |
| 233 | |
| 234 | nentries = ktrace_nentries(ktp); |
| 235 | index++; |
| 236 | if (index < nentries) { |
| 237 | ktsp->ks_index = index; |
| 238 | } else { |
| 239 | ktsp->ks_index = 0; |
| 240 | if (index > nentries) |
| 241 | ktep = NULL; |
| 242 | } |
| 243 | return ktep; |
| 244 | } |
| 245 | |
| 246 | /* |
| 247 | * ktrace_next() |
| 248 | * |
| 249 | * This is used to iterate through the entries of the given |
| 250 | * trace buffer. The caller must pass in the ktrace_snap_t |
| 251 | * structure initialized by ktrace_first(). The return value |
| 252 | * will be either a pointer to the next ktrace_entry or NULL |
| 253 | * if all of the entries have been traversed. |
| 254 | */ |
| 255 | ktrace_entry_t * |
| 256 | ktrace_next( |
| 257 | ktrace_t *ktp, |
| 258 | ktrace_snap_t *ktsp) |
| 259 | { |
| 260 | int index; |
| 261 | ktrace_entry_t *ktep; |
| 262 | |
| 263 | index = ktsp->ks_index; |
| 264 | if (index == ktsp->ks_start) { |
| 265 | ktep = NULL; |
| 266 | } else { |
| 267 | ktep = &ktp->kt_entries[index]; |
| 268 | } |
| 269 | |
| 270 | index++; |
| 271 | if (index == ktrace_nentries(ktp)) { |
| 272 | ktsp->ks_index = 0; |
| 273 | } else { |
| 274 | ktsp->ks_index = index; |
| 275 | } |
| 276 | |
| 277 | return ktep; |
| 278 | } |
| 279 | |
| 280 | /* |
| 281 | * ktrace_skip() |
| 282 | * |
| 283 | * Skip the next "count" entries and return the entry after that. |
| 284 | * Return NULL if this causes us to iterate past the beginning again. |
| 285 | */ |
| 286 | ktrace_entry_t * |
| 287 | ktrace_skip( |
| 288 | ktrace_t *ktp, |
| 289 | int count, |
| 290 | ktrace_snap_t *ktsp) |
| 291 | { |
| 292 | int index; |
| 293 | int new_index; |
| 294 | ktrace_entry_t *ktep; |
| 295 | int nentries = ktrace_nentries(ktp); |
| 296 | |
| 297 | index = ktsp->ks_index; |
| 298 | new_index = index + count; |
| 299 | while (new_index >= nentries) { |
| 300 | new_index -= nentries; |
| 301 | } |
| 302 | if (index == ktsp->ks_start) { |
| 303 | /* |
| 304 | * We've iterated around to the start, so we're done. |
| 305 | */ |
| 306 | ktep = NULL; |
| 307 | } else if ((new_index < index) && (index < ktsp->ks_index)) { |
| 308 | /* |
| 309 | * We've skipped past the start again, so we're done. |
| 310 | */ |
| 311 | ktep = NULL; |
| 312 | ktsp->ks_index = ktsp->ks_start; |
| 313 | } else { |
| 314 | ktep = &(ktp->kt_entries[new_index]); |
| 315 | new_index++; |
| 316 | if (new_index == nentries) { |
| 317 | ktsp->ks_index = 0; |
| 318 | } else { |
| 319 | ktsp->ks_index = new_index; |
| 320 | } |
| 321 | } |
| 322 | return ktep; |
| 323 | } |