Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1 | |
| 2 | #include <linux/wait.h> |
| 3 | #include <linux/backing-dev.h> |
| 4 | #include <linux/fs.h> |
| 5 | #include <linux/sched.h> |
| 6 | #include <linux/module.h> |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 7 | #include <linux/writeback.h> |
| 8 | #include <linux/device.h> |
| 9 | |
| 10 | |
| 11 | static struct class *bdi_class; |
| 12 | |
| 13 | static ssize_t read_ahead_kb_store(struct device *dev, |
| 14 | struct device_attribute *attr, |
| 15 | const char *buf, size_t count) |
| 16 | { |
| 17 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
| 18 | char *end; |
| 19 | unsigned long read_ahead_kb; |
| 20 | ssize_t ret = -EINVAL; |
| 21 | |
| 22 | read_ahead_kb = simple_strtoul(buf, &end, 10); |
| 23 | if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { |
| 24 | bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); |
| 25 | ret = count; |
| 26 | } |
| 27 | return ret; |
| 28 | } |
| 29 | |
| 30 | #define K(pages) ((pages) << (PAGE_SHIFT - 10)) |
| 31 | |
| 32 | #define BDI_SHOW(name, expr) \ |
| 33 | static ssize_t name##_show(struct device *dev, \ |
| 34 | struct device_attribute *attr, char *page) \ |
| 35 | { \ |
| 36 | struct backing_dev_info *bdi = dev_get_drvdata(dev); \ |
| 37 | \ |
| 38 | return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \ |
| 39 | } |
| 40 | |
| 41 | BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) |
| 42 | |
| 43 | BDI_SHOW(reclaimable_kb, K(bdi_stat(bdi, BDI_RECLAIMABLE))) |
| 44 | BDI_SHOW(writeback_kb, K(bdi_stat(bdi, BDI_WRITEBACK))) |
| 45 | |
| 46 | static inline unsigned long get_dirty(struct backing_dev_info *bdi, int i) |
| 47 | { |
| 48 | unsigned long thresh[3]; |
| 49 | |
| 50 | get_dirty_limits(&thresh[0], &thresh[1], &thresh[2], bdi); |
| 51 | |
| 52 | return thresh[i]; |
| 53 | } |
| 54 | |
| 55 | BDI_SHOW(dirty_kb, K(get_dirty(bdi, 1))) |
| 56 | BDI_SHOW(bdi_dirty_kb, K(get_dirty(bdi, 2))) |
| 57 | |
Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame^] | 58 | static ssize_t min_ratio_store(struct device *dev, |
| 59 | struct device_attribute *attr, const char *buf, size_t count) |
| 60 | { |
| 61 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
| 62 | char *end; |
| 63 | unsigned int ratio; |
| 64 | ssize_t ret = -EINVAL; |
| 65 | |
| 66 | ratio = simple_strtoul(buf, &end, 10); |
| 67 | if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { |
| 68 | ret = bdi_set_min_ratio(bdi, ratio); |
| 69 | if (!ret) |
| 70 | ret = count; |
| 71 | } |
| 72 | return ret; |
| 73 | } |
| 74 | BDI_SHOW(min_ratio, bdi->min_ratio) |
| 75 | |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 76 | #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) |
| 77 | |
| 78 | static struct device_attribute bdi_dev_attrs[] = { |
| 79 | __ATTR_RW(read_ahead_kb), |
| 80 | __ATTR_RO(reclaimable_kb), |
| 81 | __ATTR_RO(writeback_kb), |
| 82 | __ATTR_RO(dirty_kb), |
| 83 | __ATTR_RO(bdi_dirty_kb), |
Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame^] | 84 | __ATTR_RW(min_ratio), |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 85 | __ATTR_NULL, |
| 86 | }; |
| 87 | |
| 88 | static __init int bdi_class_init(void) |
| 89 | { |
| 90 | bdi_class = class_create(THIS_MODULE, "bdi"); |
| 91 | bdi_class->dev_attrs = bdi_dev_attrs; |
| 92 | return 0; |
| 93 | } |
| 94 | |
| 95 | core_initcall(bdi_class_init); |
| 96 | |
| 97 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, |
| 98 | const char *fmt, ...) |
| 99 | { |
| 100 | char *name; |
| 101 | va_list args; |
| 102 | int ret = 0; |
| 103 | struct device *dev; |
| 104 | |
| 105 | va_start(args, fmt); |
| 106 | name = kvasprintf(GFP_KERNEL, fmt, args); |
| 107 | va_end(args); |
| 108 | |
| 109 | if (!name) |
| 110 | return -ENOMEM; |
| 111 | |
| 112 | dev = device_create(bdi_class, parent, MKDEV(0, 0), name); |
| 113 | if (IS_ERR(dev)) { |
| 114 | ret = PTR_ERR(dev); |
| 115 | goto exit; |
| 116 | } |
| 117 | |
| 118 | bdi->dev = dev; |
| 119 | dev_set_drvdata(bdi->dev, bdi); |
| 120 | |
| 121 | exit: |
| 122 | kfree(name); |
| 123 | return ret; |
| 124 | } |
| 125 | EXPORT_SYMBOL(bdi_register); |
| 126 | |
| 127 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) |
| 128 | { |
| 129 | return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev)); |
| 130 | } |
| 131 | EXPORT_SYMBOL(bdi_register_dev); |
| 132 | |
| 133 | void bdi_unregister(struct backing_dev_info *bdi) |
| 134 | { |
| 135 | if (bdi->dev) { |
| 136 | device_unregister(bdi->dev); |
| 137 | bdi->dev = NULL; |
| 138 | } |
| 139 | } |
| 140 | EXPORT_SYMBOL(bdi_unregister); |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 141 | |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 142 | int bdi_init(struct backing_dev_info *bdi) |
| 143 | { |
Denis Cheng | 4b01a0b | 2007-12-04 23:45:07 -0800 | [diff] [blame] | 144 | int i; |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 145 | int err; |
| 146 | |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 147 | bdi->dev = NULL; |
| 148 | |
Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame^] | 149 | bdi->min_ratio = 0; |
| 150 | |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 151 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { |
| 152 | err = percpu_counter_init_irq(&bdi->bdi_stat[i], 0); |
Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 153 | if (err) |
| 154 | goto err; |
| 155 | } |
| 156 | |
| 157 | bdi->dirty_exceeded = 0; |
| 158 | err = prop_local_init_percpu(&bdi->completions); |
| 159 | |
| 160 | if (err) { |
| 161 | err: |
Denis Cheng | 4b01a0b | 2007-12-04 23:45:07 -0800 | [diff] [blame] | 162 | while (i--) |
Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 163 | percpu_counter_destroy(&bdi->bdi_stat[i]); |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | return err; |
| 167 | } |
| 168 | EXPORT_SYMBOL(bdi_init); |
| 169 | |
| 170 | void bdi_destroy(struct backing_dev_info *bdi) |
| 171 | { |
| 172 | int i; |
| 173 | |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 174 | bdi_unregister(bdi); |
| 175 | |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 176 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) |
| 177 | percpu_counter_destroy(&bdi->bdi_stat[i]); |
Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 178 | |
| 179 | prop_local_destroy_percpu(&bdi->completions); |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 180 | } |
| 181 | EXPORT_SYMBOL(bdi_destroy); |
| 182 | |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 183 | static wait_queue_head_t congestion_wqh[2] = { |
| 184 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), |
| 185 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) |
| 186 | }; |
| 187 | |
| 188 | |
| 189 | void clear_bdi_congested(struct backing_dev_info *bdi, int rw) |
| 190 | { |
| 191 | enum bdi_state bit; |
| 192 | wait_queue_head_t *wqh = &congestion_wqh[rw]; |
| 193 | |
| 194 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; |
| 195 | clear_bit(bit, &bdi->state); |
| 196 | smp_mb__after_clear_bit(); |
| 197 | if (waitqueue_active(wqh)) |
| 198 | wake_up(wqh); |
| 199 | } |
| 200 | EXPORT_SYMBOL(clear_bdi_congested); |
| 201 | |
| 202 | void set_bdi_congested(struct backing_dev_info *bdi, int rw) |
| 203 | { |
| 204 | enum bdi_state bit; |
| 205 | |
| 206 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; |
| 207 | set_bit(bit, &bdi->state); |
| 208 | } |
| 209 | EXPORT_SYMBOL(set_bdi_congested); |
| 210 | |
| 211 | /** |
| 212 | * congestion_wait - wait for a backing_dev to become uncongested |
| 213 | * @rw: READ or WRITE |
| 214 | * @timeout: timeout in jiffies |
| 215 | * |
| 216 | * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit |
| 217 | * write congestion. If no backing_devs are congested then just wait for the |
| 218 | * next write to be completed. |
| 219 | */ |
| 220 | long congestion_wait(int rw, long timeout) |
| 221 | { |
| 222 | long ret; |
| 223 | DEFINE_WAIT(wait); |
| 224 | wait_queue_head_t *wqh = &congestion_wqh[rw]; |
| 225 | |
| 226 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); |
| 227 | ret = io_schedule_timeout(timeout); |
| 228 | finish_wait(wqh, &wait); |
| 229 | return ret; |
| 230 | } |
| 231 | EXPORT_SYMBOL(congestion_wait); |
Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 232 | |