Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1 | /* |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 2 | * Compressed RAM block device |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 3 | * |
Nitin Gupta | 1130ebb | 2010-01-28 21:21:35 +0530 | [diff] [blame] | 4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
Minchan Kim | 7bfb3de | 2014-01-30 15:45:55 -0800 | [diff] [blame] | 5 | * 2012, 2013 Minchan Kim |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 6 | * |
| 7 | * This code is released using a dual license strategy: BSD/GPL |
| 8 | * You can choose the licence that better fits your requirements. |
| 9 | * |
| 10 | * Released under the terms of 3-clause BSD License |
| 11 | * Released under the terms of GNU General Public License Version 2.0 |
| 12 | * |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 13 | */ |
| 14 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 15 | #define KMSG_COMPONENT "zram" |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 17 | |
Robert Jennings | b1f5b81 | 2011-01-28 08:59:26 -0600 | [diff] [blame] | 18 | #ifdef CONFIG_ZRAM_DEBUG |
| 19 | #define DEBUG |
| 20 | #endif |
| 21 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 22 | #include <linux/module.h> |
| 23 | #include <linux/kernel.h> |
Randy Dunlap | 8946a08 | 2010-06-23 20:27:09 -0700 | [diff] [blame] | 24 | #include <linux/bio.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 25 | #include <linux/bitops.h> |
| 26 | #include <linux/blkdev.h> |
| 27 | #include <linux/buffer_head.h> |
| 28 | #include <linux/device.h> |
| 29 | #include <linux/genhd.h> |
| 30 | #include <linux/highmem.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 31 | #include <linux/slab.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 32 | #include <linux/string.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 33 | #include <linux/vmalloc.h> |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 34 | #include <linux/err.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 35 | |
Nitin Gupta | 16a4bfb | 2010-06-01 13:31:24 +0530 | [diff] [blame] | 36 | #include "zram_drv.h" |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 37 | |
| 38 | /* Globals */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 39 | static int zram_major; |
Jiang Liu | 0f0e3ba | 2013-06-07 00:07:29 +0800 | [diff] [blame] | 40 | static struct zram *zram_devices; |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 41 | static const char *default_compressor = "lzo"; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 42 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 43 | /* Module params (documentation at end) */ |
Davidlohr Bueso | ca3d70b | 2013-01-01 21:24:13 -0800 | [diff] [blame] | 44 | static unsigned int num_devices = 1; |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 45 | |
Sergey Senozhatsky | 8f7d282 | 2015-04-15 16:16:09 -0700 | [diff] [blame] | 46 | static inline void deprecated_attr_warn(const char *name) |
| 47 | { |
| 48 | pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n", |
| 49 | task_pid_nr(current), |
| 50 | current->comm, |
| 51 | name, |
| 52 | "See zram documentation."); |
| 53 | } |
| 54 | |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 55 | #define ZRAM_ATTR_RO(name) \ |
Ganesh Mahendran | 083914e | 2014-12-12 16:57:13 -0800 | [diff] [blame] | 56 | static ssize_t name##_show(struct device *d, \ |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 57 | struct device_attribute *attr, char *b) \ |
| 58 | { \ |
| 59 | struct zram *zram = dev_to_zram(d); \ |
Sergey Senozhatsky | 8f7d282 | 2015-04-15 16:16:09 -0700 | [diff] [blame] | 60 | \ |
| 61 | deprecated_attr_warn(__stringify(name)); \ |
Sergey Senozhatsky | 56b4e8c | 2014-04-07 15:38:22 -0700 | [diff] [blame] | 62 | return scnprintf(b, PAGE_SIZE, "%llu\n", \ |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 63 | (u64)atomic64_read(&zram->stats.name)); \ |
| 64 | } \ |
Ganesh Mahendran | 083914e | 2014-12-12 16:57:13 -0800 | [diff] [blame] | 65 | static DEVICE_ATTR_RO(name); |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 66 | |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 67 | static inline bool init_done(struct zram *zram) |
Sergey Senozhatsky | be2d1d5 | 2014-04-07 15:38:00 -0700 | [diff] [blame] | 68 | { |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 69 | return zram->disksize; |
Sergey Senozhatsky | be2d1d5 | 2014-04-07 15:38:00 -0700 | [diff] [blame] | 70 | } |
| 71 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 72 | static inline struct zram *dev_to_zram(struct device *dev) |
| 73 | { |
| 74 | return (struct zram *)dev_to_disk(dev)->private_data; |
| 75 | } |
| 76 | |
| 77 | static ssize_t disksize_show(struct device *dev, |
| 78 | struct device_attribute *attr, char *buf) |
| 79 | { |
| 80 | struct zram *zram = dev_to_zram(dev); |
| 81 | |
Sergey Senozhatsky | 56b4e8c | 2014-04-07 15:38:22 -0700 | [diff] [blame] | 82 | return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 83 | } |
| 84 | |
| 85 | static ssize_t initstate_show(struct device *dev, |
| 86 | struct device_attribute *attr, char *buf) |
| 87 | { |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 88 | u32 val; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 89 | struct zram *zram = dev_to_zram(dev); |
| 90 | |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 91 | down_read(&zram->init_lock); |
| 92 | val = init_done(zram); |
| 93 | up_read(&zram->init_lock); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 94 | |
Sergey Senozhatsky | 56b4e8c | 2014-04-07 15:38:22 -0700 | [diff] [blame] | 95 | return scnprintf(buf, PAGE_SIZE, "%u\n", val); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | static ssize_t orig_data_size_show(struct device *dev, |
| 99 | struct device_attribute *attr, char *buf) |
| 100 | { |
| 101 | struct zram *zram = dev_to_zram(dev); |
| 102 | |
Sergey Senozhatsky | 8f7d282 | 2015-04-15 16:16:09 -0700 | [diff] [blame] | 103 | deprecated_attr_warn("orig_data_size"); |
Sergey Senozhatsky | 56b4e8c | 2014-04-07 15:38:22 -0700 | [diff] [blame] | 104 | return scnprintf(buf, PAGE_SIZE, "%llu\n", |
Sergey Senozhatsky | 90a7806 | 2014-04-07 15:38:03 -0700 | [diff] [blame] | 105 | (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 106 | } |
| 107 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 108 | static ssize_t mem_used_total_show(struct device *dev, |
| 109 | struct device_attribute *attr, char *buf) |
| 110 | { |
| 111 | u64 val = 0; |
| 112 | struct zram *zram = dev_to_zram(dev); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 113 | |
Sergey Senozhatsky | 8f7d282 | 2015-04-15 16:16:09 -0700 | [diff] [blame] | 114 | deprecated_attr_warn("mem_used_total"); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 115 | down_read(&zram->init_lock); |
Weijie Yang | 5a99e95 | 2014-10-29 14:50:57 -0700 | [diff] [blame] | 116 | if (init_done(zram)) { |
| 117 | struct zram_meta *meta = zram->meta; |
Minchan Kim | 722cdc1 | 2014-10-09 15:29:50 -0700 | [diff] [blame] | 118 | val = zs_get_total_pages(meta->mem_pool); |
Weijie Yang | 5a99e95 | 2014-10-29 14:50:57 -0700 | [diff] [blame] | 119 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 120 | up_read(&zram->init_lock); |
| 121 | |
Minchan Kim | 722cdc1 | 2014-10-09 15:29:50 -0700 | [diff] [blame] | 122 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 123 | } |
| 124 | |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 125 | static ssize_t max_comp_streams_show(struct device *dev, |
| 126 | struct device_attribute *attr, char *buf) |
| 127 | { |
| 128 | int val; |
| 129 | struct zram *zram = dev_to_zram(dev); |
| 130 | |
| 131 | down_read(&zram->init_lock); |
| 132 | val = zram->max_comp_streams; |
| 133 | up_read(&zram->init_lock); |
| 134 | |
Sergey Senozhatsky | 56b4e8c | 2014-04-07 15:38:22 -0700 | [diff] [blame] | 135 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 136 | } |
| 137 | |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 138 | static ssize_t mem_limit_show(struct device *dev, |
| 139 | struct device_attribute *attr, char *buf) |
| 140 | { |
| 141 | u64 val; |
| 142 | struct zram *zram = dev_to_zram(dev); |
| 143 | |
Sergey Senozhatsky | 8f7d282 | 2015-04-15 16:16:09 -0700 | [diff] [blame] | 144 | deprecated_attr_warn("mem_limit"); |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 145 | down_read(&zram->init_lock); |
| 146 | val = zram->limit_pages; |
| 147 | up_read(&zram->init_lock); |
| 148 | |
| 149 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); |
| 150 | } |
| 151 | |
| 152 | static ssize_t mem_limit_store(struct device *dev, |
| 153 | struct device_attribute *attr, const char *buf, size_t len) |
| 154 | { |
| 155 | u64 limit; |
| 156 | char *tmp; |
| 157 | struct zram *zram = dev_to_zram(dev); |
| 158 | |
| 159 | limit = memparse(buf, &tmp); |
| 160 | if (buf == tmp) /* no chars parsed, invalid input */ |
| 161 | return -EINVAL; |
| 162 | |
| 163 | down_write(&zram->init_lock); |
| 164 | zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; |
| 165 | up_write(&zram->init_lock); |
| 166 | |
| 167 | return len; |
| 168 | } |
| 169 | |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 170 | static ssize_t mem_used_max_show(struct device *dev, |
| 171 | struct device_attribute *attr, char *buf) |
| 172 | { |
| 173 | u64 val = 0; |
| 174 | struct zram *zram = dev_to_zram(dev); |
| 175 | |
Sergey Senozhatsky | 8f7d282 | 2015-04-15 16:16:09 -0700 | [diff] [blame] | 176 | deprecated_attr_warn("mem_used_max"); |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 177 | down_read(&zram->init_lock); |
| 178 | if (init_done(zram)) |
| 179 | val = atomic_long_read(&zram->stats.max_used_pages); |
| 180 | up_read(&zram->init_lock); |
| 181 | |
| 182 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); |
| 183 | } |
| 184 | |
| 185 | static ssize_t mem_used_max_store(struct device *dev, |
| 186 | struct device_attribute *attr, const char *buf, size_t len) |
| 187 | { |
| 188 | int err; |
| 189 | unsigned long val; |
| 190 | struct zram *zram = dev_to_zram(dev); |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 191 | |
| 192 | err = kstrtoul(buf, 10, &val); |
| 193 | if (err || val != 0) |
| 194 | return -EINVAL; |
| 195 | |
| 196 | down_read(&zram->init_lock); |
Weijie Yang | 5a99e95 | 2014-10-29 14:50:57 -0700 | [diff] [blame] | 197 | if (init_done(zram)) { |
| 198 | struct zram_meta *meta = zram->meta; |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 199 | atomic_long_set(&zram->stats.max_used_pages, |
| 200 | zs_get_total_pages(meta->mem_pool)); |
Weijie Yang | 5a99e95 | 2014-10-29 14:50:57 -0700 | [diff] [blame] | 201 | } |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 202 | up_read(&zram->init_lock); |
| 203 | |
| 204 | return len; |
| 205 | } |
| 206 | |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 207 | static ssize_t max_comp_streams_store(struct device *dev, |
| 208 | struct device_attribute *attr, const char *buf, size_t len) |
| 209 | { |
| 210 | int num; |
| 211 | struct zram *zram = dev_to_zram(dev); |
Minchan Kim | 60a726e | 2014-04-07 15:38:21 -0700 | [diff] [blame] | 212 | int ret; |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 213 | |
Minchan Kim | 60a726e | 2014-04-07 15:38:21 -0700 | [diff] [blame] | 214 | ret = kstrtoint(buf, 0, &num); |
| 215 | if (ret < 0) |
| 216 | return ret; |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 217 | if (num < 1) |
| 218 | return -EINVAL; |
Minchan Kim | 60a726e | 2014-04-07 15:38:21 -0700 | [diff] [blame] | 219 | |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 220 | down_write(&zram->init_lock); |
| 221 | if (init_done(zram)) { |
Minchan Kim | 60a726e | 2014-04-07 15:38:21 -0700 | [diff] [blame] | 222 | if (!zcomp_set_max_streams(zram->comp, num)) { |
Sergey Senozhatsky | fe8eb12 | 2014-04-07 15:38:15 -0700 | [diff] [blame] | 223 | pr_info("Cannot change max compression streams\n"); |
Minchan Kim | 60a726e | 2014-04-07 15:38:21 -0700 | [diff] [blame] | 224 | ret = -EINVAL; |
| 225 | goto out; |
| 226 | } |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 227 | } |
Minchan Kim | 60a726e | 2014-04-07 15:38:21 -0700 | [diff] [blame] | 228 | |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 229 | zram->max_comp_streams = num; |
Minchan Kim | 60a726e | 2014-04-07 15:38:21 -0700 | [diff] [blame] | 230 | ret = len; |
| 231 | out: |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 232 | up_write(&zram->init_lock); |
Minchan Kim | 60a726e | 2014-04-07 15:38:21 -0700 | [diff] [blame] | 233 | return ret; |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 234 | } |
| 235 | |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 236 | static ssize_t comp_algorithm_show(struct device *dev, |
| 237 | struct device_attribute *attr, char *buf) |
| 238 | { |
| 239 | size_t sz; |
| 240 | struct zram *zram = dev_to_zram(dev); |
| 241 | |
| 242 | down_read(&zram->init_lock); |
| 243 | sz = zcomp_available_show(zram->compressor, buf); |
| 244 | up_read(&zram->init_lock); |
| 245 | |
| 246 | return sz; |
| 247 | } |
| 248 | |
| 249 | static ssize_t comp_algorithm_store(struct device *dev, |
| 250 | struct device_attribute *attr, const char *buf, size_t len) |
| 251 | { |
| 252 | struct zram *zram = dev_to_zram(dev); |
| 253 | down_write(&zram->init_lock); |
| 254 | if (init_done(zram)) { |
| 255 | up_write(&zram->init_lock); |
| 256 | pr_info("Can't change algorithm for initialized device\n"); |
| 257 | return -EBUSY; |
| 258 | } |
| 259 | strlcpy(zram->compressor, buf, sizeof(zram->compressor)); |
| 260 | up_write(&zram->init_lock); |
| 261 | return len; |
| 262 | } |
| 263 | |
Minchan Kim | 9296747 | 2014-01-30 15:46:03 -0800 | [diff] [blame] | 264 | /* flag operations needs meta->tb_lock */ |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 265 | static int zram_test_flag(struct zram_meta *meta, u32 index, |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 266 | enum zram_pageflags flag) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 267 | { |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 268 | return meta->table[index].value & BIT(flag); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 269 | } |
| 270 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 271 | static void zram_set_flag(struct zram_meta *meta, u32 index, |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 272 | enum zram_pageflags flag) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 273 | { |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 274 | meta->table[index].value |= BIT(flag); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 275 | } |
| 276 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 277 | static void zram_clear_flag(struct zram_meta *meta, u32 index, |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 278 | enum zram_pageflags flag) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 279 | { |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 280 | meta->table[index].value &= ~BIT(flag); |
| 281 | } |
| 282 | |
| 283 | static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) |
| 284 | { |
| 285 | return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); |
| 286 | } |
| 287 | |
| 288 | static void zram_set_obj_size(struct zram_meta *meta, |
| 289 | u32 index, size_t size) |
| 290 | { |
| 291 | unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT; |
| 292 | |
| 293 | meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 294 | } |
| 295 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 296 | static inline int is_partial_io(struct bio_vec *bvec) |
| 297 | { |
| 298 | return bvec->bv_len != PAGE_SIZE; |
| 299 | } |
| 300 | |
| 301 | /* |
| 302 | * Check if request is within bounds and aligned on zram logical blocks. |
| 303 | */ |
karam.lee | 54850e7 | 2014-12-12 16:56:50 -0800 | [diff] [blame] | 304 | static inline int valid_io_request(struct zram *zram, |
| 305 | sector_t start, unsigned int size) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 306 | { |
karam.lee | 54850e7 | 2014-12-12 16:56:50 -0800 | [diff] [blame] | 307 | u64 end, bound; |
Kumar Gaurav | a539c72 | 2013-08-08 23:53:24 +0530 | [diff] [blame] | 308 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 309 | /* unaligned request */ |
karam.lee | 54850e7 | 2014-12-12 16:56:50 -0800 | [diff] [blame] | 310 | if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 311 | return 0; |
karam.lee | 54850e7 | 2014-12-12 16:56:50 -0800 | [diff] [blame] | 312 | if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 313 | return 0; |
| 314 | |
karam.lee | 54850e7 | 2014-12-12 16:56:50 -0800 | [diff] [blame] | 315 | end = start + (size >> SECTOR_SHIFT); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 316 | bound = zram->disksize >> SECTOR_SHIFT; |
| 317 | /* out of range range */ |
Sergey Senozhatsky | 75c7caf | 2013-06-22 17:21:00 +0300 | [diff] [blame] | 318 | if (unlikely(start >= bound || end > bound || start > end)) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 319 | return 0; |
| 320 | |
| 321 | /* I/O request is valid */ |
| 322 | return 1; |
| 323 | } |
| 324 | |
Ganesh Mahendran | 1fec117 | 2015-02-12 15:00:33 -0800 | [diff] [blame] | 325 | static void zram_meta_free(struct zram_meta *meta, u64 disksize) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 326 | { |
Ganesh Mahendran | 1fec117 | 2015-02-12 15:00:33 -0800 | [diff] [blame] | 327 | size_t num_pages = disksize >> PAGE_SHIFT; |
| 328 | size_t index; |
| 329 | |
| 330 | /* Free all pages that are still in this zram device */ |
| 331 | for (index = 0; index < num_pages; index++) { |
| 332 | unsigned long handle = meta->table[index].handle; |
| 333 | |
| 334 | if (!handle) |
| 335 | continue; |
| 336 | |
| 337 | zs_free(meta->mem_pool, handle); |
| 338 | } |
| 339 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 340 | zs_destroy_pool(meta->mem_pool); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 341 | vfree(meta->table); |
| 342 | kfree(meta); |
| 343 | } |
| 344 | |
Ganesh Mahendran | 3eba0c6 | 2015-02-12 15:00:51 -0800 | [diff] [blame] | 345 | static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 346 | { |
| 347 | size_t num_pages; |
Ganesh Mahendran | 3eba0c6 | 2015-02-12 15:00:51 -0800 | [diff] [blame] | 348 | char pool_name[8]; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 349 | struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); |
Sergey Senozhatsky | b817995 | 2015-02-12 15:00:31 -0800 | [diff] [blame] | 350 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 351 | if (!meta) |
Sergey Senozhatsky | b817995 | 2015-02-12 15:00:31 -0800 | [diff] [blame] | 352 | return NULL; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 353 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 354 | num_pages = disksize >> PAGE_SHIFT; |
| 355 | meta->table = vzalloc(num_pages * sizeof(*meta->table)); |
| 356 | if (!meta->table) { |
| 357 | pr_err("Error allocating zram address table\n"); |
Sergey Senozhatsky | b817995 | 2015-02-12 15:00:31 -0800 | [diff] [blame] | 358 | goto out_error; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 359 | } |
| 360 | |
Ganesh Mahendran | 3eba0c6 | 2015-02-12 15:00:51 -0800 | [diff] [blame] | 361 | snprintf(pool_name, sizeof(pool_name), "zram%d", device_id); |
| 362 | meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 363 | if (!meta->mem_pool) { |
| 364 | pr_err("Error creating memory pool\n"); |
Sergey Senozhatsky | b817995 | 2015-02-12 15:00:31 -0800 | [diff] [blame] | 365 | goto out_error; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | return meta; |
| 369 | |
Sergey Senozhatsky | b817995 | 2015-02-12 15:00:31 -0800 | [diff] [blame] | 370 | out_error: |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 371 | vfree(meta->table); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 372 | kfree(meta); |
Sergey Senozhatsky | b817995 | 2015-02-12 15:00:31 -0800 | [diff] [blame] | 373 | return NULL; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 374 | } |
| 375 | |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 376 | static inline bool zram_meta_get(struct zram *zram) |
| 377 | { |
| 378 | if (atomic_inc_not_zero(&zram->refcount)) |
| 379 | return true; |
| 380 | return false; |
| 381 | } |
| 382 | |
| 383 | static inline void zram_meta_put(struct zram *zram) |
| 384 | { |
| 385 | atomic_dec(&zram->refcount); |
| 386 | } |
| 387 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 388 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) |
| 389 | { |
| 390 | if (*offset + bvec->bv_len >= PAGE_SIZE) |
| 391 | (*index)++; |
| 392 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; |
| 393 | } |
| 394 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 395 | static int page_zero_filled(void *ptr) |
| 396 | { |
| 397 | unsigned int pos; |
| 398 | unsigned long *page; |
| 399 | |
| 400 | page = (unsigned long *)ptr; |
| 401 | |
| 402 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { |
| 403 | if (page[pos]) |
| 404 | return 0; |
| 405 | } |
| 406 | |
| 407 | return 1; |
| 408 | } |
| 409 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 410 | static void handle_zero_page(struct bio_vec *bvec) |
| 411 | { |
| 412 | struct page *page = bvec->bv_page; |
| 413 | void *user_mem; |
| 414 | |
| 415 | user_mem = kmap_atomic(page); |
| 416 | if (is_partial_io(bvec)) |
| 417 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); |
| 418 | else |
| 419 | clear_page(user_mem); |
| 420 | kunmap_atomic(user_mem); |
| 421 | |
| 422 | flush_dcache_page(page); |
| 423 | } |
| 424 | |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 425 | |
| 426 | /* |
| 427 | * To protect concurrent access to the same index entry, |
| 428 | * caller should hold this table index entry's bit_spinlock to |
| 429 | * indicate this index entry is accessing. |
| 430 | */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 431 | static void zram_free_page(struct zram *zram, size_t index) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 432 | { |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 433 | struct zram_meta *meta = zram->meta; |
| 434 | unsigned long handle = meta->table[index].handle; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 435 | |
Nitin Gupta | fd1a30d | 2012-01-09 16:51:59 -0600 | [diff] [blame] | 436 | if (unlikely(!handle)) { |
Nitin Gupta | 2e88228 | 2010-01-28 21:13:41 +0530 | [diff] [blame] | 437 | /* |
| 438 | * No memory is allocated for zero filled pages. |
| 439 | * Simply clear zero page flag. |
| 440 | */ |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 441 | if (zram_test_flag(meta, index, ZRAM_ZERO)) { |
| 442 | zram_clear_flag(meta, index, ZRAM_ZERO); |
Sergey Senozhatsky | 90a7806 | 2014-04-07 15:38:03 -0700 | [diff] [blame] | 443 | atomic64_dec(&zram->stats.zero_pages); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 444 | } |
| 445 | return; |
| 446 | } |
| 447 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 448 | zs_free(meta->mem_pool, handle); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 449 | |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 450 | atomic64_sub(zram_get_obj_size(meta, index), |
| 451 | &zram->stats.compr_data_size); |
Sergey Senozhatsky | 90a7806 | 2014-04-07 15:38:03 -0700 | [diff] [blame] | 452 | atomic64_dec(&zram->stats.pages_stored); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 453 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 454 | meta->table[index].handle = 0; |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 455 | zram_set_obj_size(meta, index, 0); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 456 | } |
| 457 | |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 458 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 459 | { |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 460 | int ret = 0; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 461 | unsigned char *cmem; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 462 | struct zram_meta *meta = zram->meta; |
Minchan Kim | 9296747 | 2014-01-30 15:46:03 -0800 | [diff] [blame] | 463 | unsigned long handle; |
Minchan Kim | 023b409 | 2014-08-06 16:08:29 -0700 | [diff] [blame] | 464 | size_t size; |
Minchan Kim | 9296747 | 2014-01-30 15:46:03 -0800 | [diff] [blame] | 465 | |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 466 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
Minchan Kim | 9296747 | 2014-01-30 15:46:03 -0800 | [diff] [blame] | 467 | handle = meta->table[index].handle; |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 468 | size = zram_get_obj_size(meta, index); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 469 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 470 | if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 471 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Jiang Liu | 42e99bd | 2013-06-07 00:07:30 +0800 | [diff] [blame] | 472 | clear_page(mem); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 473 | return 0; |
| 474 | } |
| 475 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 476 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); |
Minchan Kim | 9296747 | 2014-01-30 15:46:03 -0800 | [diff] [blame] | 477 | if (size == PAGE_SIZE) |
Jiang Liu | 42e99bd | 2013-06-07 00:07:30 +0800 | [diff] [blame] | 478 | copy_page(mem, cmem); |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 479 | else |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 480 | ret = zcomp_decompress(zram->comp, cmem, size, mem); |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 481 | zs_unmap_object(meta->mem_pool, handle); |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 482 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 483 | |
| 484 | /* Should NEVER happen. Return bio error if it does. */ |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 485 | if (unlikely(ret)) { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 486 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 487 | return ret; |
| 488 | } |
| 489 | |
| 490 | return 0; |
| 491 | } |
| 492 | |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 493 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
karam.lee | b627cff | 2014-12-12 16:56:47 -0800 | [diff] [blame] | 494 | u32 index, int offset) |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 495 | { |
| 496 | int ret; |
| 497 | struct page *page; |
| 498 | unsigned char *user_mem, *uncmem = NULL; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 499 | struct zram_meta *meta = zram->meta; |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 500 | page = bvec->bv_page; |
| 501 | |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 502 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 503 | if (unlikely(!meta->table[index].handle) || |
| 504 | zram_test_flag(meta, index, ZRAM_ZERO)) { |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 505 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 506 | handle_zero_page(bvec); |
| 507 | return 0; |
| 508 | } |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 509 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 510 | |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 511 | if (is_partial_io(bvec)) |
| 512 | /* Use a temporary buffer to decompress the page */ |
Minchan Kim | 7e5a510 | 2013-01-30 11:41:39 +0900 | [diff] [blame] | 513 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
| 514 | |
| 515 | user_mem = kmap_atomic(page); |
| 516 | if (!is_partial_io(bvec)) |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 517 | uncmem = user_mem; |
| 518 | |
| 519 | if (!uncmem) { |
| 520 | pr_info("Unable to allocate temp memory\n"); |
| 521 | ret = -ENOMEM; |
| 522 | goto out_cleanup; |
| 523 | } |
| 524 | |
| 525 | ret = zram_decompress_page(zram, uncmem, index); |
| 526 | /* Should NEVER happen. Return bio error if it does. */ |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 527 | if (unlikely(ret)) |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 528 | goto out_cleanup; |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 529 | |
| 530 | if (is_partial_io(bvec)) |
| 531 | memcpy(user_mem + bvec->bv_offset, uncmem + offset, |
| 532 | bvec->bv_len); |
| 533 | |
| 534 | flush_dcache_page(page); |
| 535 | ret = 0; |
| 536 | out_cleanup: |
| 537 | kunmap_atomic(user_mem); |
| 538 | if (is_partial_io(bvec)) |
| 539 | kfree(uncmem); |
| 540 | return ret; |
| 541 | } |
| 542 | |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 543 | static inline void update_used_max(struct zram *zram, |
| 544 | const unsigned long pages) |
| 545 | { |
Joonsoo Kim | 2ea55a2 | 2015-02-27 15:52:01 -0800 | [diff] [blame] | 546 | unsigned long old_max, cur_max; |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 547 | |
| 548 | old_max = atomic_long_read(&zram->stats.max_used_pages); |
| 549 | |
| 550 | do { |
| 551 | cur_max = old_max; |
| 552 | if (pages > cur_max) |
| 553 | old_max = atomic_long_cmpxchg( |
| 554 | &zram->stats.max_used_pages, cur_max, pages); |
| 555 | } while (old_max != cur_max); |
| 556 | } |
| 557 | |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 558 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, |
| 559 | int offset) |
| 560 | { |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 561 | int ret = 0; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 562 | size_t clen; |
Minchan Kim | c234434 | 2012-06-08 15:39:25 +0900 | [diff] [blame] | 563 | unsigned long handle; |
Minchan Kim | 130f315 | 2012-06-08 15:39:27 +0900 | [diff] [blame] | 564 | struct page *page; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 565 | unsigned char *user_mem, *cmem, *src, *uncmem = NULL; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 566 | struct zram_meta *meta = zram->meta; |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 567 | struct zcomp_strm *zstrm; |
Minchan Kim | e46e331 | 2014-01-30 15:46:06 -0800 | [diff] [blame] | 568 | bool locked = false; |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 569 | unsigned long alloced_pages; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 570 | |
| 571 | page = bvec->bv_page; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 572 | if (is_partial_io(bvec)) { |
| 573 | /* |
| 574 | * This is a partial IO. We need to read the full page |
| 575 | * before to write the changes. |
| 576 | */ |
Minchan Kim | 7e5a510 | 2013-01-30 11:41:39 +0900 | [diff] [blame] | 577 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 578 | if (!uncmem) { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 579 | ret = -ENOMEM; |
| 580 | goto out; |
| 581 | } |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 582 | ret = zram_decompress_page(zram, uncmem, index); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 583 | if (ret) |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 584 | goto out; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 585 | } |
| 586 | |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 587 | zstrm = zcomp_strm_find(zram->comp); |
Minchan Kim | e46e331 | 2014-01-30 15:46:06 -0800 | [diff] [blame] | 588 | locked = true; |
Cong Wang | ba82fe2 | 2011-11-25 23:14:25 +0800 | [diff] [blame] | 589 | user_mem = kmap_atomic(page); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 590 | |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 591 | if (is_partial_io(bvec)) { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 592 | memcpy(uncmem + offset, user_mem + bvec->bv_offset, |
| 593 | bvec->bv_len); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 594 | kunmap_atomic(user_mem); |
| 595 | user_mem = NULL; |
| 596 | } else { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 597 | uncmem = user_mem; |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 598 | } |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 599 | |
| 600 | if (page_zero_filled(uncmem)) { |
Weijie Yang | c406515 | 2014-11-13 15:19:05 -0800 | [diff] [blame] | 601 | if (user_mem) |
| 602 | kunmap_atomic(user_mem); |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 603 | /* Free memory associated with this sector now. */ |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 604 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 605 | zram_free_page(zram, index); |
Minchan Kim | 9296747 | 2014-01-30 15:46:03 -0800 | [diff] [blame] | 606 | zram_set_flag(meta, index, ZRAM_ZERO); |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 607 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 608 | |
Sergey Senozhatsky | 90a7806 | 2014-04-07 15:38:03 -0700 | [diff] [blame] | 609 | atomic64_inc(&zram->stats.zero_pages); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 610 | ret = 0; |
| 611 | goto out; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 612 | } |
| 613 | |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 614 | ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 615 | if (!is_partial_io(bvec)) { |
| 616 | kunmap_atomic(user_mem); |
| 617 | user_mem = NULL; |
| 618 | uncmem = NULL; |
| 619 | } |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 620 | |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 621 | if (unlikely(ret)) { |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 622 | pr_err("Compression failed! err=%d\n", ret); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 623 | goto out; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 624 | } |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 625 | src = zstrm->buffer; |
Nitin Gupta | c8f2f0d | 2012-10-10 17:42:18 -0700 | [diff] [blame] | 626 | if (unlikely(clen > max_zpage_size)) { |
Nitin Gupta | c8f2f0d | 2012-10-10 17:42:18 -0700 | [diff] [blame] | 627 | clen = PAGE_SIZE; |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 628 | if (is_partial_io(bvec)) |
| 629 | src = uncmem; |
Nitin Gupta | c8f2f0d | 2012-10-10 17:42:18 -0700 | [diff] [blame] | 630 | } |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 631 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 632 | handle = zs_malloc(meta->mem_pool, clen); |
Nitin Gupta | fd1a30d | 2012-01-09 16:51:59 -0600 | [diff] [blame] | 633 | if (!handle) { |
Marlies Ruck | 596b3dd | 2013-05-16 14:30:39 -0400 | [diff] [blame] | 634 | pr_info("Error allocating memory for compressed page: %u, size=%zu\n", |
| 635 | index, clen); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 636 | ret = -ENOMEM; |
| 637 | goto out; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 638 | } |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 639 | |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 640 | alloced_pages = zs_get_total_pages(meta->mem_pool); |
| 641 | if (zram->limit_pages && alloced_pages > zram->limit_pages) { |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 642 | zs_free(meta->mem_pool, handle); |
| 643 | ret = -ENOMEM; |
| 644 | goto out; |
| 645 | } |
| 646 | |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 647 | update_used_max(zram, alloced_pages); |
| 648 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 649 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 650 | |
Jiang Liu | 42e99bd | 2013-06-07 00:07:30 +0800 | [diff] [blame] | 651 | if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 652 | src = kmap_atomic(page); |
Jiang Liu | 42e99bd | 2013-06-07 00:07:30 +0800 | [diff] [blame] | 653 | copy_page(cmem, src); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 654 | kunmap_atomic(src); |
Jiang Liu | 42e99bd | 2013-06-07 00:07:30 +0800 | [diff] [blame] | 655 | } else { |
| 656 | memcpy(cmem, src, clen); |
| 657 | } |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 658 | |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 659 | zcomp_strm_release(zram->comp, zstrm); |
| 660 | locked = false; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 661 | zs_unmap_object(meta->mem_pool, handle); |
Nitin Gupta | fd1a30d | 2012-01-09 16:51:59 -0600 | [diff] [blame] | 662 | |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 663 | /* |
| 664 | * Free memory associated with this sector |
| 665 | * before overwriting unused sectors. |
| 666 | */ |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 667 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 668 | zram_free_page(zram, index); |
| 669 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 670 | meta->table[index].handle = handle; |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 671 | zram_set_obj_size(meta, index, clen); |
| 672 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 673 | |
| 674 | /* Update stats */ |
Sergey Senozhatsky | 90a7806 | 2014-04-07 15:38:03 -0700 | [diff] [blame] | 675 | atomic64_add(clen, &zram->stats.compr_data_size); |
| 676 | atomic64_inc(&zram->stats.pages_stored); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 677 | out: |
Minchan Kim | e46e331 | 2014-01-30 15:46:06 -0800 | [diff] [blame] | 678 | if (locked) |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 679 | zcomp_strm_release(zram->comp, zstrm); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 680 | if (is_partial_io(bvec)) |
| 681 | kfree(uncmem); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 682 | return ret; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 683 | } |
| 684 | |
| 685 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, |
karam.lee | b627cff | 2014-12-12 16:56:47 -0800 | [diff] [blame] | 686 | int offset, int rw) |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 687 | { |
Sergey Senozhatsky | 8811a94 | 2015-04-15 16:15:57 -0700 | [diff] [blame] | 688 | unsigned long start_time = jiffies; |
Jerome Marchand | c5bde23 | 2011-06-10 15:28:49 +0200 | [diff] [blame] | 689 | int ret; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 690 | |
Sergey Senozhatsky | 8811a94 | 2015-04-15 16:15:57 -0700 | [diff] [blame] | 691 | generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT, |
| 692 | &zram->disk->part0); |
| 693 | |
Sergey Senozhatsky | be257c6 | 2014-04-07 15:38:01 -0700 | [diff] [blame] | 694 | if (rw == READ) { |
| 695 | atomic64_inc(&zram->stats.num_reads); |
karam.lee | b627cff | 2014-12-12 16:56:47 -0800 | [diff] [blame] | 696 | ret = zram_bvec_read(zram, bvec, index, offset); |
Sergey Senozhatsky | be257c6 | 2014-04-07 15:38:01 -0700 | [diff] [blame] | 697 | } else { |
| 698 | atomic64_inc(&zram->stats.num_writes); |
Jerome Marchand | c5bde23 | 2011-06-10 15:28:49 +0200 | [diff] [blame] | 699 | ret = zram_bvec_write(zram, bvec, index, offset); |
Sergey Senozhatsky | be257c6 | 2014-04-07 15:38:01 -0700 | [diff] [blame] | 700 | } |
Jerome Marchand | c5bde23 | 2011-06-10 15:28:49 +0200 | [diff] [blame] | 701 | |
Sergey Senozhatsky | 8811a94 | 2015-04-15 16:15:57 -0700 | [diff] [blame] | 702 | generic_end_io_acct(rw, &zram->disk->part0, start_time); |
| 703 | |
Chao Yu | 0cf1e9d | 2014-08-29 15:18:37 -0700 | [diff] [blame] | 704 | if (unlikely(ret)) { |
| 705 | if (rw == READ) |
| 706 | atomic64_inc(&zram->stats.failed_reads); |
| 707 | else |
| 708 | atomic64_inc(&zram->stats.failed_writes); |
| 709 | } |
| 710 | |
Jerome Marchand | c5bde23 | 2011-06-10 15:28:49 +0200 | [diff] [blame] | 711 | return ret; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 712 | } |
| 713 | |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 714 | /* |
| 715 | * zram_bio_discard - handler on discard request |
| 716 | * @index: physical block index in PAGE_SIZE units |
| 717 | * @offset: byte offset within physical block |
| 718 | */ |
| 719 | static void zram_bio_discard(struct zram *zram, u32 index, |
| 720 | int offset, struct bio *bio) |
| 721 | { |
| 722 | size_t n = bio->bi_iter.bi_size; |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 723 | struct zram_meta *meta = zram->meta; |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 724 | |
| 725 | /* |
| 726 | * zram manages data in physical block size units. Because logical block |
| 727 | * size isn't identical with physical block size on some arch, we |
| 728 | * could get a discard request pointing to a specific offset within a |
| 729 | * certain physical block. Although we can handle this request by |
| 730 | * reading that physiclal block and decompressing and partially zeroing |
| 731 | * and re-compressing and then re-storing it, this isn't reasonable |
| 732 | * because our intent with a discard request is to save memory. So |
| 733 | * skipping this logical block is appropriate here. |
| 734 | */ |
| 735 | if (offset) { |
Weijie Yang | 38515c7 | 2014-06-04 16:11:06 -0700 | [diff] [blame] | 736 | if (n <= (PAGE_SIZE - offset)) |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 737 | return; |
| 738 | |
Weijie Yang | 38515c7 | 2014-06-04 16:11:06 -0700 | [diff] [blame] | 739 | n -= (PAGE_SIZE - offset); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 740 | index++; |
| 741 | } |
| 742 | |
| 743 | while (n >= PAGE_SIZE) { |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 744 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 745 | zram_free_page(zram, index); |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 746 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Sergey Senozhatsky | 015254d | 2014-10-09 15:29:57 -0700 | [diff] [blame] | 747 | atomic64_inc(&zram->stats.notify_free); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 748 | index++; |
| 749 | n -= PAGE_SIZE; |
| 750 | } |
| 751 | } |
| 752 | |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 753 | static void zram_reset_device(struct zram *zram) |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 754 | { |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 755 | struct zram_meta *meta; |
| 756 | struct zcomp *comp; |
| 757 | u64 disksize; |
| 758 | |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 759 | down_write(&zram->init_lock); |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 760 | |
| 761 | zram->limit_pages = 0; |
| 762 | |
Sergey Senozhatsky | be2d1d5 | 2014-04-07 15:38:00 -0700 | [diff] [blame] | 763 | if (!init_done(zram)) { |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 764 | up_write(&zram->init_lock); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 765 | return; |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 766 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 767 | |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 768 | meta = zram->meta; |
| 769 | comp = zram->comp; |
| 770 | disksize = zram->disksize; |
| 771 | /* |
| 772 | * Refcount will go down to 0 eventually and r/w handler |
| 773 | * cannot handle further I/O so it will bail out by |
| 774 | * check zram_meta_get. |
| 775 | */ |
| 776 | zram_meta_put(zram); |
| 777 | /* |
| 778 | * We want to free zram_meta in process context to avoid |
| 779 | * deadlock between reclaim path and any other locks. |
| 780 | */ |
| 781 | wait_event(zram->io_done, atomic_read(&zram->refcount) == 0); |
| 782 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 783 | /* Reset stats */ |
| 784 | memset(&zram->stats, 0, sizeof(zram->stats)); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 785 | zram->disksize = 0; |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 786 | zram->max_comp_streams = 1; |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 787 | set_capacity(zram->disk, 0); |
| 788 | |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 789 | up_write(&zram->init_lock); |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 790 | /* I/O operation under all of CPU are done so let's free */ |
| 791 | zram_meta_free(meta, disksize); |
| 792 | zcomp_destroy(comp); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 793 | } |
| 794 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 795 | static ssize_t disksize_store(struct device *dev, |
| 796 | struct device_attribute *attr, const char *buf, size_t len) |
| 797 | { |
| 798 | u64 disksize; |
Sergey Senozhatsky | d61f98c | 2014-04-07 15:38:19 -0700 | [diff] [blame] | 799 | struct zcomp *comp; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 800 | struct zram_meta *meta; |
| 801 | struct zram *zram = dev_to_zram(dev); |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 802 | int err; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 803 | |
| 804 | disksize = memparse(buf, NULL); |
| 805 | if (!disksize) |
| 806 | return -EINVAL; |
| 807 | |
| 808 | disksize = PAGE_ALIGN(disksize); |
Ganesh Mahendran | 3eba0c6 | 2015-02-12 15:00:51 -0800 | [diff] [blame] | 809 | meta = zram_meta_alloc(zram->disk->first_minor, disksize); |
Minchan Kim | db5d711 | 2014-03-03 15:38:34 -0800 | [diff] [blame] | 810 | if (!meta) |
| 811 | return -ENOMEM; |
Sergey Senozhatsky | b67d1ec | 2014-04-07 15:38:09 -0700 | [diff] [blame] | 812 | |
Sergey Senozhatsky | d61f98c | 2014-04-07 15:38:19 -0700 | [diff] [blame] | 813 | comp = zcomp_create(zram->compressor, zram->max_comp_streams); |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 814 | if (IS_ERR(comp)) { |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 815 | pr_info("Cannot initialise %s compressing backend\n", |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 816 | zram->compressor); |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 817 | err = PTR_ERR(comp); |
| 818 | goto out_free_meta; |
Sergey Senozhatsky | d61f98c | 2014-04-07 15:38:19 -0700 | [diff] [blame] | 819 | } |
| 820 | |
| 821 | down_write(&zram->init_lock); |
| 822 | if (init_done(zram)) { |
Sergey Senozhatsky | d61f98c | 2014-04-07 15:38:19 -0700 | [diff] [blame] | 823 | pr_info("Cannot change disksize for initialized device\n"); |
| 824 | err = -EBUSY; |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 825 | goto out_destroy_comp; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 826 | } |
| 827 | |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 828 | init_waitqueue_head(&zram->io_done); |
| 829 | atomic_set(&zram->refcount, 1); |
Sergey Senozhatsky | b67d1ec | 2014-04-07 15:38:09 -0700 | [diff] [blame] | 830 | zram->meta = meta; |
Sergey Senozhatsky | d61f98c | 2014-04-07 15:38:19 -0700 | [diff] [blame] | 831 | zram->comp = comp; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 832 | zram->disksize = disksize; |
| 833 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 834 | up_write(&zram->init_lock); |
Minchan Kim | b4c5c60 | 2014-07-23 14:00:04 -0700 | [diff] [blame] | 835 | |
| 836 | /* |
| 837 | * Revalidate disk out of the init_lock to avoid lockdep splat. |
| 838 | * It's okay because disk's capacity is protected by init_lock |
| 839 | * so that revalidate_disk always sees up-to-date capacity. |
| 840 | */ |
| 841 | revalidate_disk(zram->disk); |
| 842 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 843 | return len; |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 844 | |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 845 | out_destroy_comp: |
| 846 | up_write(&zram->init_lock); |
| 847 | zcomp_destroy(comp); |
| 848 | out_free_meta: |
Ganesh Mahendran | 1fec117 | 2015-02-12 15:00:33 -0800 | [diff] [blame] | 849 | zram_meta_free(meta, disksize); |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 850 | return err; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 851 | } |
| 852 | |
| 853 | static ssize_t reset_store(struct device *dev, |
| 854 | struct device_attribute *attr, const char *buf, size_t len) |
| 855 | { |
| 856 | int ret; |
| 857 | unsigned short do_reset; |
| 858 | struct zram *zram; |
| 859 | struct block_device *bdev; |
| 860 | |
| 861 | zram = dev_to_zram(dev); |
| 862 | bdev = bdget_disk(zram->disk, 0); |
| 863 | |
Rashika Kheria | 46a51c8 | 2013-10-30 18:36:32 +0530 | [diff] [blame] | 864 | if (!bdev) |
| 865 | return -ENOMEM; |
| 866 | |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 867 | mutex_lock(&bdev->bd_mutex); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 868 | /* Do not reset an active device! */ |
Minchan Kim | 2b269ce | 2015-02-12 15:00:42 -0800 | [diff] [blame] | 869 | if (bdev->bd_openers) { |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 870 | ret = -EBUSY; |
| 871 | goto out; |
| 872 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 873 | |
| 874 | ret = kstrtou16(buf, 10, &do_reset); |
| 875 | if (ret) |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 876 | goto out; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 877 | |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 878 | if (!do_reset) { |
| 879 | ret = -EINVAL; |
| 880 | goto out; |
| 881 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 882 | |
| 883 | /* Make sure all pending I/O is finished */ |
Rashika Kheria | 46a51c8 | 2013-10-30 18:36:32 +0530 | [diff] [blame] | 884 | fsync_bdev(bdev); |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 885 | zram_reset_device(zram); |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 886 | |
| 887 | mutex_unlock(&bdev->bd_mutex); |
| 888 | revalidate_disk(zram->disk); |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 889 | bdput(bdev); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 890 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 891 | return len; |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 892 | |
| 893 | out: |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 894 | mutex_unlock(&bdev->bd_mutex); |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 895 | bdput(bdev); |
| 896 | return ret; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 897 | } |
| 898 | |
Sergey Senozhatsky | be257c6 | 2014-04-07 15:38:01 -0700 | [diff] [blame] | 899 | static void __zram_make_request(struct zram *zram, struct bio *bio) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 900 | { |
karam.lee | b627cff | 2014-12-12 16:56:47 -0800 | [diff] [blame] | 901 | int offset, rw; |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 902 | u32 index; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 903 | struct bio_vec bvec; |
| 904 | struct bvec_iter iter; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 905 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 906 | index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
| 907 | offset = (bio->bi_iter.bi_sector & |
| 908 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 909 | |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 910 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { |
| 911 | zram_bio_discard(zram, index, offset, bio); |
| 912 | bio_endio(bio, 0); |
| 913 | return; |
| 914 | } |
| 915 | |
karam.lee | b627cff | 2014-12-12 16:56:47 -0800 | [diff] [blame] | 916 | rw = bio_data_dir(bio); |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 917 | bio_for_each_segment(bvec, bio, iter) { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 918 | int max_transfer_size = PAGE_SIZE - offset; |
| 919 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 920 | if (bvec.bv_len > max_transfer_size) { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 921 | /* |
| 922 | * zram_bvec_rw() can only make operation on a single |
| 923 | * zram page. Split the bio vector. |
| 924 | */ |
| 925 | struct bio_vec bv; |
| 926 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 927 | bv.bv_page = bvec.bv_page; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 928 | bv.bv_len = max_transfer_size; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 929 | bv.bv_offset = bvec.bv_offset; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 930 | |
karam.lee | b627cff | 2014-12-12 16:56:47 -0800 | [diff] [blame] | 931 | if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0) |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 932 | goto out; |
| 933 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 934 | bv.bv_len = bvec.bv_len - max_transfer_size; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 935 | bv.bv_offset += max_transfer_size; |
karam.lee | b627cff | 2014-12-12 16:56:47 -0800 | [diff] [blame] | 936 | if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0) |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 937 | goto out; |
| 938 | } else |
karam.lee | b627cff | 2014-12-12 16:56:47 -0800 | [diff] [blame] | 939 | if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0) |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 940 | goto out; |
| 941 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 942 | update_position(&index, &offset, &bvec); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 943 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 944 | |
| 945 | set_bit(BIO_UPTODATE, &bio->bi_flags); |
| 946 | bio_endio(bio, 0); |
Nitin Gupta | 7d7854b | 2011-01-22 07:36:15 -0500 | [diff] [blame] | 947 | return; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 948 | |
| 949 | out: |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 950 | bio_io_error(bio); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 951 | } |
| 952 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 953 | /* |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 954 | * Handler function for all zram I/O requests. |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 955 | */ |
Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 956 | static void zram_make_request(struct request_queue *queue, struct bio *bio) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 957 | { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 958 | struct zram *zram = queue->queuedata; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 959 | |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 960 | if (unlikely(!zram_meta_get(zram))) |
Minchan Kim | 3de738c | 2013-01-30 11:41:41 +0900 | [diff] [blame] | 961 | goto error; |
Jerome Marchand | 0900bea | 2011-09-06 15:02:11 +0200 | [diff] [blame] | 962 | |
karam.lee | 54850e7 | 2014-12-12 16:56:50 -0800 | [diff] [blame] | 963 | if (!valid_io_request(zram, bio->bi_iter.bi_sector, |
| 964 | bio->bi_iter.bi_size)) { |
Jiang Liu | da5cc7d | 2013-06-07 00:07:31 +0800 | [diff] [blame] | 965 | atomic64_inc(&zram->stats.invalid_io); |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 966 | goto put_zram; |
Jerome Marchand | 6642a67 | 2011-02-17 17:11:49 +0100 | [diff] [blame] | 967 | } |
| 968 | |
Sergey Senozhatsky | be257c6 | 2014-04-07 15:38:01 -0700 | [diff] [blame] | 969 | __zram_make_request(zram, bio); |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 970 | zram_meta_put(zram); |
Linus Torvalds | b4fdcb0 | 2011-11-04 17:06:58 -0700 | [diff] [blame] | 971 | return; |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 972 | put_zram: |
| 973 | zram_meta_put(zram); |
Jerome Marchand | 0900bea | 2011-09-06 15:02:11 +0200 | [diff] [blame] | 974 | error: |
| 975 | bio_io_error(bio); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 976 | } |
| 977 | |
Nitin Gupta | 2ccbec0 | 2011-09-09 19:01:00 -0400 | [diff] [blame] | 978 | static void zram_slot_free_notify(struct block_device *bdev, |
| 979 | unsigned long index) |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 980 | { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 981 | struct zram *zram; |
Minchan Kim | f614a9f | 2014-01-30 15:46:04 -0800 | [diff] [blame] | 982 | struct zram_meta *meta; |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 983 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 984 | zram = bdev->bd_disk->private_data; |
Minchan Kim | f614a9f | 2014-01-30 15:46:04 -0800 | [diff] [blame] | 985 | meta = zram->meta; |
| 986 | |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 987 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
Minchan Kim | f614a9f | 2014-01-30 15:46:04 -0800 | [diff] [blame] | 988 | zram_free_page(zram, index); |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 989 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Jiang Liu | da5cc7d | 2013-06-07 00:07:31 +0800 | [diff] [blame] | 990 | atomic64_inc(&zram->stats.notify_free); |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 991 | } |
| 992 | |
karam.lee | 8c7f010 | 2014-12-12 16:56:53 -0800 | [diff] [blame] | 993 | static int zram_rw_page(struct block_device *bdev, sector_t sector, |
| 994 | struct page *page, int rw) |
| 995 | { |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 996 | int offset, err = -EIO; |
karam.lee | 8c7f010 | 2014-12-12 16:56:53 -0800 | [diff] [blame] | 997 | u32 index; |
| 998 | struct zram *zram; |
| 999 | struct bio_vec bv; |
| 1000 | |
| 1001 | zram = bdev->bd_disk->private_data; |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 1002 | if (unlikely(!zram_meta_get(zram))) |
| 1003 | goto out; |
| 1004 | |
karam.lee | 8c7f010 | 2014-12-12 16:56:53 -0800 | [diff] [blame] | 1005 | if (!valid_io_request(zram, sector, PAGE_SIZE)) { |
| 1006 | atomic64_inc(&zram->stats.invalid_io); |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 1007 | err = -EINVAL; |
| 1008 | goto put_zram; |
karam.lee | 8c7f010 | 2014-12-12 16:56:53 -0800 | [diff] [blame] | 1009 | } |
| 1010 | |
| 1011 | index = sector >> SECTORS_PER_PAGE_SHIFT; |
| 1012 | offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; |
| 1013 | |
| 1014 | bv.bv_page = page; |
| 1015 | bv.bv_len = PAGE_SIZE; |
| 1016 | bv.bv_offset = 0; |
| 1017 | |
| 1018 | err = zram_bvec_rw(zram, &bv, index, offset, rw); |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 1019 | put_zram: |
| 1020 | zram_meta_put(zram); |
| 1021 | out: |
karam.lee | 8c7f010 | 2014-12-12 16:56:53 -0800 | [diff] [blame] | 1022 | /* |
| 1023 | * If I/O fails, just return error(ie, non-zero) without |
| 1024 | * calling page_endio. |
| 1025 | * It causes resubmit the I/O with bio request by upper functions |
| 1026 | * of rw_page(e.g., swap_readpage, __swap_writepage) and |
| 1027 | * bio->bi_end_io does things to handle the error |
| 1028 | * (e.g., SetPageError, set_page_dirty and extra works). |
| 1029 | */ |
| 1030 | if (err == 0) |
| 1031 | page_endio(page, rw, 0); |
| 1032 | return err; |
| 1033 | } |
| 1034 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1035 | static const struct block_device_operations zram_devops = { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1036 | .swap_slot_free_notify = zram_slot_free_notify, |
karam.lee | 8c7f010 | 2014-12-12 16:56:53 -0800 | [diff] [blame] | 1037 | .rw_page = zram_rw_page, |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 1038 | .owner = THIS_MODULE |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1039 | }; |
| 1040 | |
Ganesh Mahendran | 083914e | 2014-12-12 16:57:13 -0800 | [diff] [blame] | 1041 | static DEVICE_ATTR_RW(disksize); |
| 1042 | static DEVICE_ATTR_RO(initstate); |
| 1043 | static DEVICE_ATTR_WO(reset); |
| 1044 | static DEVICE_ATTR_RO(orig_data_size); |
| 1045 | static DEVICE_ATTR_RO(mem_used_total); |
| 1046 | static DEVICE_ATTR_RW(mem_limit); |
| 1047 | static DEVICE_ATTR_RW(mem_used_max); |
| 1048 | static DEVICE_ATTR_RW(max_comp_streams); |
| 1049 | static DEVICE_ATTR_RW(comp_algorithm); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1050 | |
Sergey Senozhatsky | 2f6a3be | 2015-04-15 16:16:03 -0700 | [diff] [blame] | 1051 | static ssize_t io_stat_show(struct device *dev, |
| 1052 | struct device_attribute *attr, char *buf) |
| 1053 | { |
| 1054 | struct zram *zram = dev_to_zram(dev); |
| 1055 | ssize_t ret; |
| 1056 | |
| 1057 | down_read(&zram->init_lock); |
| 1058 | ret = scnprintf(buf, PAGE_SIZE, |
| 1059 | "%8llu %8llu %8llu %8llu\n", |
| 1060 | (u64)atomic64_read(&zram->stats.failed_reads), |
| 1061 | (u64)atomic64_read(&zram->stats.failed_writes), |
| 1062 | (u64)atomic64_read(&zram->stats.invalid_io), |
| 1063 | (u64)atomic64_read(&zram->stats.notify_free)); |
| 1064 | up_read(&zram->init_lock); |
| 1065 | |
| 1066 | return ret; |
| 1067 | } |
| 1068 | |
Sergey Senozhatsky | 4f2109f | 2015-04-15 16:16:06 -0700 | [diff] [blame] | 1069 | static ssize_t mm_stat_show(struct device *dev, |
| 1070 | struct device_attribute *attr, char *buf) |
| 1071 | { |
| 1072 | struct zram *zram = dev_to_zram(dev); |
| 1073 | u64 orig_size, mem_used = 0; |
| 1074 | long max_used; |
| 1075 | ssize_t ret; |
| 1076 | |
| 1077 | down_read(&zram->init_lock); |
| 1078 | if (init_done(zram)) |
| 1079 | mem_used = zs_get_total_pages(zram->meta->mem_pool); |
| 1080 | |
| 1081 | orig_size = atomic64_read(&zram->stats.pages_stored); |
| 1082 | max_used = atomic_long_read(&zram->stats.max_used_pages); |
| 1083 | |
| 1084 | ret = scnprintf(buf, PAGE_SIZE, |
| 1085 | "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n", |
| 1086 | orig_size << PAGE_SHIFT, |
| 1087 | (u64)atomic64_read(&zram->stats.compr_data_size), |
| 1088 | mem_used << PAGE_SHIFT, |
| 1089 | zram->limit_pages << PAGE_SHIFT, |
| 1090 | max_used << PAGE_SHIFT, |
| 1091 | (u64)atomic64_read(&zram->stats.zero_pages), |
| 1092 | (u64)atomic64_read(&zram->stats.num_migrated)); |
| 1093 | up_read(&zram->init_lock); |
| 1094 | |
| 1095 | return ret; |
| 1096 | } |
| 1097 | |
Sergey Senozhatsky | 2f6a3be | 2015-04-15 16:16:03 -0700 | [diff] [blame] | 1098 | static DEVICE_ATTR_RO(io_stat); |
Sergey Senozhatsky | 4f2109f | 2015-04-15 16:16:06 -0700 | [diff] [blame] | 1099 | static DEVICE_ATTR_RO(mm_stat); |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 1100 | ZRAM_ATTR_RO(num_reads); |
| 1101 | ZRAM_ATTR_RO(num_writes); |
Sergey Senozhatsky | 6444724 | 2014-04-07 15:38:05 -0700 | [diff] [blame] | 1102 | ZRAM_ATTR_RO(failed_reads); |
| 1103 | ZRAM_ATTR_RO(failed_writes); |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 1104 | ZRAM_ATTR_RO(invalid_io); |
| 1105 | ZRAM_ATTR_RO(notify_free); |
| 1106 | ZRAM_ATTR_RO(zero_pages); |
| 1107 | ZRAM_ATTR_RO(compr_data_size); |
| 1108 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1109 | static struct attribute *zram_disk_attrs[] = { |
| 1110 | &dev_attr_disksize.attr, |
| 1111 | &dev_attr_initstate.attr, |
| 1112 | &dev_attr_reset.attr, |
| 1113 | &dev_attr_num_reads.attr, |
| 1114 | &dev_attr_num_writes.attr, |
Sergey Senozhatsky | 6444724 | 2014-04-07 15:38:05 -0700 | [diff] [blame] | 1115 | &dev_attr_failed_reads.attr, |
| 1116 | &dev_attr_failed_writes.attr, |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1117 | &dev_attr_invalid_io.attr, |
| 1118 | &dev_attr_notify_free.attr, |
| 1119 | &dev_attr_zero_pages.attr, |
| 1120 | &dev_attr_orig_data_size.attr, |
| 1121 | &dev_attr_compr_data_size.attr, |
| 1122 | &dev_attr_mem_used_total.attr, |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 1123 | &dev_attr_mem_limit.attr, |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 1124 | &dev_attr_mem_used_max.attr, |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 1125 | &dev_attr_max_comp_streams.attr, |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 1126 | &dev_attr_comp_algorithm.attr, |
Sergey Senozhatsky | 2f6a3be | 2015-04-15 16:16:03 -0700 | [diff] [blame] | 1127 | &dev_attr_io_stat.attr, |
Sergey Senozhatsky | 4f2109f | 2015-04-15 16:16:06 -0700 | [diff] [blame] | 1128 | &dev_attr_mm_stat.attr, |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1129 | NULL, |
| 1130 | }; |
| 1131 | |
| 1132 | static struct attribute_group zram_disk_attr_group = { |
| 1133 | .attrs = zram_disk_attrs, |
| 1134 | }; |
| 1135 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1136 | static int create_device(struct zram *zram, int device_id) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1137 | { |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1138 | struct request_queue *queue; |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 1139 | int ret = -ENOMEM; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1140 | |
Jerome Marchand | 0900bea | 2011-09-06 15:02:11 +0200 | [diff] [blame] | 1141 | init_rwsem(&zram->init_lock); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1142 | |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1143 | queue = blk_alloc_queue(GFP_KERNEL); |
| 1144 | if (!queue) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1145 | pr_err("Error allocating disk queue for device %d\n", |
| 1146 | device_id); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1147 | goto out; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1148 | } |
| 1149 | |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1150 | blk_queue_make_request(queue, zram_make_request); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1151 | |
| 1152 | /* gendisk structure */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1153 | zram->disk = alloc_disk(1); |
| 1154 | if (!zram->disk) { |
Sam Hansen | 94b8435 | 2012-06-07 16:03:47 -0700 | [diff] [blame] | 1155 | pr_warn("Error allocating disk structure for device %d\n", |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1156 | device_id); |
Julia Lawall | 201c7b7 | 2015-04-15 16:16:27 -0700 | [diff] [blame] | 1157 | ret = -ENOMEM; |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 1158 | goto out_free_queue; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1159 | } |
| 1160 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1161 | zram->disk->major = zram_major; |
| 1162 | zram->disk->first_minor = device_id; |
| 1163 | zram->disk->fops = &zram_devops; |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1164 | zram->disk->queue = queue; |
| 1165 | zram->disk->queue->queuedata = zram; |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1166 | zram->disk->private_data = zram; |
| 1167 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1168 | |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 1169 | /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1170 | set_capacity(zram->disk, 0); |
Sergey Senozhatsky | b67d1ec | 2014-04-07 15:38:09 -0700 | [diff] [blame] | 1171 | /* zram devices sort of resembles non-rotational disks */ |
| 1172 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); |
Mike Snitzer | b277da0 | 2014-10-04 10:55:32 -0600 | [diff] [blame] | 1173 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 1174 | /* |
| 1175 | * To ensure that we always get PAGE_SIZE aligned |
| 1176 | * and n*PAGE_SIZED sized I/O requests. |
| 1177 | */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1178 | blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); |
Robert Jennings | 7b19b8d | 2011-01-28 08:58:17 -0600 | [diff] [blame] | 1179 | blk_queue_logical_block_size(zram->disk->queue, |
| 1180 | ZRAM_LOGICAL_BLOCK_SIZE); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1181 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); |
| 1182 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 1183 | zram->disk->queue->limits.discard_granularity = PAGE_SIZE; |
| 1184 | zram->disk->queue->limits.max_discard_sectors = UINT_MAX; |
| 1185 | /* |
| 1186 | * zram_bio_discard() will clear all logical blocks if logical block |
| 1187 | * size is identical with physical block size(PAGE_SIZE). But if it is |
| 1188 | * different, we will skip discarding some parts of logical blocks in |
| 1189 | * the part of the request range which isn't aligned to physical block |
| 1190 | * size. So we can't ensure that all discarded logical blocks are |
| 1191 | * zeroed. |
| 1192 | */ |
| 1193 | if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) |
| 1194 | zram->disk->queue->limits.discard_zeroes_data = 1; |
| 1195 | else |
| 1196 | zram->disk->queue->limits.discard_zeroes_data = 0; |
| 1197 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); |
Nitin Gupta | 5d83d5a | 2010-01-28 21:13:39 +0530 | [diff] [blame] | 1198 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1199 | add_disk(zram->disk); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1200 | |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 1201 | ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, |
| 1202 | &zram_disk_attr_group); |
| 1203 | if (ret < 0) { |
Sam Hansen | 94b8435 | 2012-06-07 16:03:47 -0700 | [diff] [blame] | 1204 | pr_warn("Error creating sysfs group"); |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 1205 | goto out_free_disk; |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 1206 | } |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 1207 | strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); |
Sergey Senozhatsky | be2d1d5 | 2014-04-07 15:38:00 -0700 | [diff] [blame] | 1208 | zram->meta = NULL; |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 1209 | zram->max_comp_streams = 1; |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 1210 | return 0; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1211 | |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 1212 | out_free_disk: |
| 1213 | del_gendisk(zram->disk); |
| 1214 | put_disk(zram->disk); |
| 1215 | out_free_queue: |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1216 | blk_cleanup_queue(queue); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1217 | out: |
| 1218 | return ret; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1219 | } |
| 1220 | |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1221 | static void destroy_devices(unsigned int nr) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1222 | { |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1223 | struct zram *zram; |
| 1224 | unsigned int i; |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 1225 | |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1226 | for (i = 0; i < nr; i++) { |
| 1227 | zram = &zram_devices[i]; |
| 1228 | /* |
| 1229 | * Remove sysfs first, so no one will perform a disksize |
| 1230 | * store while we destroy the devices |
| 1231 | */ |
| 1232 | sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, |
| 1233 | &zram_disk_attr_group); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1234 | |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1235 | zram_reset_device(zram); |
| 1236 | |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1237 | blk_cleanup_queue(zram->disk->queue); |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1238 | del_gendisk(zram->disk); |
| 1239 | put_disk(zram->disk); |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1240 | } |
| 1241 | |
| 1242 | kfree(zram_devices); |
| 1243 | unregister_blkdev(zram_major, "zram"); |
| 1244 | pr_info("Destroyed %u device(s)\n", nr); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1245 | } |
| 1246 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1247 | static int __init zram_init(void) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1248 | { |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1249 | int ret, dev_id; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1250 | |
Nitin Gupta | 5fa5a90 | 2012-02-12 23:04:45 -0500 | [diff] [blame] | 1251 | if (num_devices > max_num_devices) { |
Sam Hansen | 94b8435 | 2012-06-07 16:03:47 -0700 | [diff] [blame] | 1252 | pr_warn("Invalid value for num_devices: %u\n", |
Nitin Gupta | 5fa5a90 | 2012-02-12 23:04:45 -0500 | [diff] [blame] | 1253 | num_devices); |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1254 | return -EINVAL; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1255 | } |
| 1256 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1257 | zram_major = register_blkdev(0, "zram"); |
| 1258 | if (zram_major <= 0) { |
Sam Hansen | 94b8435 | 2012-06-07 16:03:47 -0700 | [diff] [blame] | 1259 | pr_warn("Unable to get major number\n"); |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1260 | return -EBUSY; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1261 | } |
| 1262 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1263 | /* Allocate the device array and initialize each one */ |
Nitin Gupta | 5fa5a90 | 2012-02-12 23:04:45 -0500 | [diff] [blame] | 1264 | zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL); |
Noah Watkins | 43801f6 | 2011-07-20 17:05:57 -0600 | [diff] [blame] | 1265 | if (!zram_devices) { |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1266 | unregister_blkdev(zram_major, "zram"); |
| 1267 | return -ENOMEM; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1268 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1269 | |
Nitin Gupta | 5fa5a90 | 2012-02-12 23:04:45 -0500 | [diff] [blame] | 1270 | for (dev_id = 0; dev_id < num_devices; dev_id++) { |
Noah Watkins | 43801f6 | 2011-07-20 17:05:57 -0600 | [diff] [blame] | 1271 | ret = create_device(&zram_devices[dev_id], dev_id); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1272 | if (ret) |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1273 | goto out_error; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1274 | } |
| 1275 | |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1276 | pr_info("Created %u device(s)\n", num_devices); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1277 | return 0; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1278 | |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1279 | out_error: |
| 1280 | destroy_devices(dev_id); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1281 | return ret; |
| 1282 | } |
| 1283 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1284 | static void __exit zram_exit(void) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1285 | { |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1286 | destroy_devices(num_devices); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1287 | } |
| 1288 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1289 | module_init(zram_init); |
| 1290 | module_exit(zram_exit); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1291 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1292 | module_param(num_devices, uint, 0); |
| 1293 | MODULE_PARM_DESC(num_devices, "Number of zram devices"); |
| 1294 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1295 | MODULE_LICENSE("Dual BSD/GPL"); |
| 1296 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1297 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |