blob: 317ef0abccbbd22bf320c31ddc0c99ba9f4aa713 [file] [log] [blame]
Tao Maf56654c2008-08-18 17:38:48 +08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * xattr.c
5 *
Tiger Yangc3cb6822008-10-23 16:33:03 +08006 * Copyright (C) 2004, 2008 Oracle. All rights reserved.
Tao Maf56654c2008-08-18 17:38:48 +08007 *
Tiger Yangcf1d6c72008-08-18 17:11:00 +08008 * CREDITS:
Tiger Yangc3cb6822008-10-23 16:33:03 +08009 * Lots of code in this file is copy from linux/fs/ext3/xattr.c.
10 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
Tiger Yangcf1d6c72008-08-18 17:11:00 +080011 *
Tao Maf56654c2008-08-18 17:38:48 +080012 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public
Tiger Yangc3cb6822008-10-23 16:33:03 +080014 * License version 2 as published by the Free Software Foundation.
Tao Maf56654c2008-08-18 17:38:48 +080015 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
Tao Maf56654c2008-08-18 17:38:48 +080020 */
21
Tiger Yangcf1d6c72008-08-18 17:11:00 +080022#include <linux/capability.h>
23#include <linux/fs.h>
24#include <linux/types.h>
25#include <linux/slab.h>
26#include <linux/highmem.h>
27#include <linux/pagemap.h>
28#include <linux/uio.h>
29#include <linux/sched.h>
30#include <linux/splice.h>
31#include <linux/mount.h>
32#include <linux/writeback.h>
33#include <linux/falloc.h>
Tao Ma01225592008-08-18 17:38:53 +080034#include <linux/sort.h>
Mark Fasheh99219ae2008-10-07 14:52:59 -070035#include <linux/init.h>
36#include <linux/module.h>
37#include <linux/string.h>
Tiger Yang923f7f32008-11-14 11:16:27 +080038#include <linux/security.h>
Tiger Yangcf1d6c72008-08-18 17:11:00 +080039
Tao Maf56654c2008-08-18 17:38:48 +080040#include <cluster/masklog.h>
41
42#include "ocfs2.h"
43#include "alloc.h"
Joel Beckerd6b32bb2008-10-17 14:55:01 -070044#include "blockcheck.h"
Tao Maf56654c2008-08-18 17:38:48 +080045#include "dlmglue.h"
46#include "file.h"
Tiger Yangcf1d6c72008-08-18 17:11:00 +080047#include "symlink.h"
48#include "sysfile.h"
Tao Maf56654c2008-08-18 17:38:48 +080049#include "inode.h"
50#include "journal.h"
51#include "ocfs2_fs.h"
52#include "suballoc.h"
53#include "uptodate.h"
54#include "buffer_head_io.h"
Tao Ma0c044f02008-08-18 17:38:50 +080055#include "super.h"
Tiger Yangcf1d6c72008-08-18 17:11:00 +080056#include "xattr.h"
Tao Ma492a8a32009-08-18 11:43:17 +080057#include "refcounttree.h"
Tao Ma0fe9b662009-08-18 11:47:56 +080058#include "acl.h"
Tao Ma402b4182011-02-23 22:01:17 +080059#include "ocfs2_trace.h"
Tiger Yangcf1d6c72008-08-18 17:11:00 +080060
61struct ocfs2_xattr_def_value_root {
62 struct ocfs2_xattr_value_root xv;
63 struct ocfs2_extent_rec er;
64};
65
Tao Ma0c044f02008-08-18 17:38:50 +080066struct ocfs2_xattr_bucket {
Joel Beckerba937122008-10-24 19:13:20 -070067 /* The inode these xattrs are associated with */
68 struct inode *bu_inode;
69
70 /* The actual buffers that make up the bucket */
Joel Becker4ac60322008-10-18 19:11:42 -070071 struct buffer_head *bu_bhs[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET];
Joel Beckerba937122008-10-24 19:13:20 -070072
73 /* How many blocks make up one bucket for this filesystem */
74 int bu_blocks;
Tao Ma0c044f02008-08-18 17:38:50 +080075};
76
Tao Ma78f30c32008-11-12 08:27:00 +080077struct ocfs2_xattr_set_ctxt {
Tao Ma85db90e2008-11-12 08:27:01 +080078 handle_t *handle;
Tao Ma78f30c32008-11-12 08:27:00 +080079 struct ocfs2_alloc_context *meta_ac;
80 struct ocfs2_alloc_context *data_ac;
81 struct ocfs2_cached_dealloc_ctxt dealloc;
Tao Ma5f5261a2010-05-13 22:49:05 +080082 int set_abort;
Tao Ma78f30c32008-11-12 08:27:00 +080083};
84
Tiger Yangcf1d6c72008-08-18 17:11:00 +080085#define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root))
86#define OCFS2_XATTR_INLINE_SIZE 80
Tiger Yang4442f512009-02-20 11:11:50 +080087#define OCFS2_XATTR_HEADER_GAP 4
Tiger Yang534eadd2008-11-14 11:16:41 +080088#define OCFS2_XATTR_FREE_IN_IBODY (OCFS2_MIN_XATTR_INLINE_SIZE \
89 - sizeof(struct ocfs2_xattr_header) \
Tiger Yang4442f512009-02-20 11:11:50 +080090 - OCFS2_XATTR_HEADER_GAP)
Tiger Yang89c38bd2008-11-14 11:17:41 +080091#define OCFS2_XATTR_FREE_IN_BLOCK(ptr) ((ptr)->i_sb->s_blocksize \
92 - sizeof(struct ocfs2_xattr_block) \
93 - sizeof(struct ocfs2_xattr_header) \
Tiger Yang4442f512009-02-20 11:11:50 +080094 - OCFS2_XATTR_HEADER_GAP)
Tiger Yangcf1d6c72008-08-18 17:11:00 +080095
96static struct ocfs2_xattr_def_value_root def_xv = {
97 .xv.xr_list.l_count = cpu_to_le16(1),
98};
99
Stephen Hemminger537d81c2010-05-13 17:53:22 -0700100const struct xattr_handler *ocfs2_xattr_handlers[] = {
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800101 &ocfs2_xattr_user_handler,
Tiger Yang929fb012008-11-14 11:17:04 +0800102 &ocfs2_xattr_acl_access_handler,
103 &ocfs2_xattr_acl_default_handler,
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800104 &ocfs2_xattr_trusted_handler,
Tiger Yang923f7f32008-11-14 11:16:27 +0800105 &ocfs2_xattr_security_handler,
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800106 NULL
107};
108
Stephen Hemminger537d81c2010-05-13 17:53:22 -0700109static const struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800110 [OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler,
Tiger Yang929fb012008-11-14 11:17:04 +0800111 [OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS]
112 = &ocfs2_xattr_acl_access_handler,
113 [OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT]
114 = &ocfs2_xattr_acl_default_handler,
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800115 [OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler,
Tiger Yang923f7f32008-11-14 11:16:27 +0800116 [OCFS2_XATTR_INDEX_SECURITY] = &ocfs2_xattr_security_handler,
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800117};
118
119struct ocfs2_xattr_info {
Joel Becker6b240ff2009-08-14 18:02:52 -0700120 int xi_name_index;
121 const char *xi_name;
Joel Becker18853b92009-08-14 18:17:07 -0700122 int xi_name_len;
Joel Becker6b240ff2009-08-14 18:02:52 -0700123 const void *xi_value;
124 size_t xi_value_len;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800125};
126
127struct ocfs2_xattr_search {
128 struct buffer_head *inode_bh;
129 /*
130 * xattr_bh point to the block buffer head which has extended attribute
131 * when extended attribute in inode, xattr_bh is equal to inode_bh.
132 */
133 struct buffer_head *xattr_bh;
134 struct ocfs2_xattr_header *header;
Joel Beckerba937122008-10-24 19:13:20 -0700135 struct ocfs2_xattr_bucket *bucket;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800136 void *base;
137 void *end;
138 struct ocfs2_xattr_entry *here;
139 int not_found;
140};
141
Joel Becker11179f22009-08-14 16:07:44 -0700142/* Operations on struct ocfs2_xa_entry */
143struct ocfs2_xa_loc;
144struct ocfs2_xa_loc_operations {
145 /*
Joel Beckercf2bc802009-08-18 13:52:38 -0700146 * Journal functions
147 */
148 int (*xlo_journal_access)(handle_t *handle, struct ocfs2_xa_loc *loc,
149 int type);
150 void (*xlo_journal_dirty)(handle_t *handle, struct ocfs2_xa_loc *loc);
151
152 /*
Joel Becker11179f22009-08-14 16:07:44 -0700153 * Return a pointer to the appropriate buffer in loc->xl_storage
154 * at the given offset from loc->xl_header.
155 */
156 void *(*xlo_offset_pointer)(struct ocfs2_xa_loc *loc, int offset);
157
Joel Becker69a3e532009-08-17 12:24:39 -0700158 /* Can we reuse the existing entry for the new value? */
159 int (*xlo_can_reuse)(struct ocfs2_xa_loc *loc,
160 struct ocfs2_xattr_info *xi);
161
162 /* How much space is needed for the new value? */
163 int (*xlo_check_space)(struct ocfs2_xa_loc *loc,
164 struct ocfs2_xattr_info *xi);
165
166 /*
167 * Return the offset of the first name+value pair. This is
168 * the start of our downward-filling free space.
169 */
170 int (*xlo_get_free_start)(struct ocfs2_xa_loc *loc);
171
Joel Becker11179f22009-08-14 16:07:44 -0700172 /*
173 * Remove the name+value at this location. Do whatever is
174 * appropriate with the remaining name+value pairs.
175 */
176 void (*xlo_wipe_namevalue)(struct ocfs2_xa_loc *loc);
Joel Becker69a3e532009-08-17 12:24:39 -0700177
178 /* Fill xl_entry with a new entry */
179 void (*xlo_add_entry)(struct ocfs2_xa_loc *loc, u32 name_hash);
180
181 /* Add name+value storage to an entry */
182 void (*xlo_add_namevalue)(struct ocfs2_xa_loc *loc, int size);
Joel Becker3fc12af2009-08-18 13:20:27 -0700183
184 /*
185 * Initialize the value buf's access and bh fields for this entry.
186 * ocfs2_xa_fill_value_buf() will handle the xv pointer.
187 */
188 void (*xlo_fill_value_buf)(struct ocfs2_xa_loc *loc,
189 struct ocfs2_xattr_value_buf *vb);
Joel Becker11179f22009-08-14 16:07:44 -0700190};
191
192/*
193 * Describes an xattr entry location. This is a memory structure
194 * tracking the on-disk structure.
195 */
196struct ocfs2_xa_loc {
Joel Beckercf2bc802009-08-18 13:52:38 -0700197 /* This xattr belongs to this inode */
198 struct inode *xl_inode;
199
Joel Becker11179f22009-08-14 16:07:44 -0700200 /* The ocfs2_xattr_header inside the on-disk storage. Not NULL. */
201 struct ocfs2_xattr_header *xl_header;
202
203 /* Bytes from xl_header to the end of the storage */
204 int xl_size;
205
206 /*
207 * The ocfs2_xattr_entry this location describes. If this is
208 * NULL, this location describes the on-disk structure where it
209 * would have been.
210 */
211 struct ocfs2_xattr_entry *xl_entry;
212
213 /*
214 * Internal housekeeping
215 */
216
217 /* Buffer(s) containing this entry */
218 void *xl_storage;
219
220 /* Operations on the storage backing this location */
221 const struct ocfs2_xa_loc_operations *xl_ops;
222};
223
Joel Becker199799a2009-08-14 19:04:15 -0700224/*
225 * Convenience functions to calculate how much space is needed for a
226 * given name+value pair
227 */
228static int namevalue_size(int name_len, uint64_t value_len)
229{
230 if (value_len > OCFS2_XATTR_INLINE_SIZE)
231 return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
232 else
233 return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
234}
235
236static int namevalue_size_xi(struct ocfs2_xattr_info *xi)
237{
238 return namevalue_size(xi->xi_name_len, xi->xi_value_len);
239}
240
241static int namevalue_size_xe(struct ocfs2_xattr_entry *xe)
242{
243 u64 value_len = le64_to_cpu(xe->xe_value_size);
244
245 BUG_ON((value_len > OCFS2_XATTR_INLINE_SIZE) &&
246 ocfs2_xattr_is_local(xe));
247 return namevalue_size(xe->xe_name_len, value_len);
248}
249
250
Tao Mafd68a892009-08-18 11:43:21 +0800251static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
Tao Ma589dc262008-08-18 17:38:51 +0800252 struct ocfs2_xattr_header *xh,
253 int index,
254 int *block_off,
255 int *new_offset);
256
Joel Becker54f443f2008-10-20 18:43:07 -0700257static int ocfs2_xattr_block_find(struct inode *inode,
258 int name_index,
259 const char *name,
260 struct ocfs2_xattr_search *xs);
Tao Ma589dc262008-08-18 17:38:51 +0800261static int ocfs2_xattr_index_block_find(struct inode *inode,
262 struct buffer_head *root_bh,
263 int name_index,
264 const char *name,
265 struct ocfs2_xattr_search *xs);
266
Tao Ma0c044f02008-08-18 17:38:50 +0800267static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
Tao Ma47bca492009-08-18 11:43:42 +0800268 struct buffer_head *blk_bh,
Tao Ma0c044f02008-08-18 17:38:50 +0800269 char *buffer,
270 size_t buffer_size);
271
Tao Ma01225592008-08-18 17:38:53 +0800272static int ocfs2_xattr_create_index_block(struct inode *inode,
Tao Ma78f30c32008-11-12 08:27:00 +0800273 struct ocfs2_xattr_search *xs,
274 struct ocfs2_xattr_set_ctxt *ctxt);
Tao Ma01225592008-08-18 17:38:53 +0800275
276static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
277 struct ocfs2_xattr_info *xi,
Tao Ma78f30c32008-11-12 08:27:00 +0800278 struct ocfs2_xattr_search *xs,
279 struct ocfs2_xattr_set_ctxt *ctxt);
Tao Ma01225592008-08-18 17:38:53 +0800280
Tao Ma47bca492009-08-18 11:43:42 +0800281typedef int (xattr_tree_rec_func)(struct inode *inode,
282 struct buffer_head *root_bh,
283 u64 blkno, u32 cpos, u32 len, void *para);
284static int ocfs2_iterate_xattr_index_block(struct inode *inode,
285 struct buffer_head *root_bh,
286 xattr_tree_rec_func *rec_func,
287 void *para);
288static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
289 struct ocfs2_xattr_bucket *bucket,
290 void *para);
291static int ocfs2_rm_xattr_cluster(struct inode *inode,
292 struct buffer_head *root_bh,
293 u64 blkno,
294 u32 cpos,
295 u32 len,
296 void *para);
297
Joel Beckerc58b6032008-11-26 13:36:24 -0800298static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
299 u64 src_blk, u64 last_blk, u64 to_blk,
300 unsigned int start_bucket,
301 u32 *first_hash);
Tao Ma492a8a32009-08-18 11:43:17 +0800302static int ocfs2_prepare_refcount_xattr(struct inode *inode,
303 struct ocfs2_dinode *di,
304 struct ocfs2_xattr_info *xi,
305 struct ocfs2_xattr_search *xis,
306 struct ocfs2_xattr_search *xbs,
307 struct ocfs2_refcount_tree **ref_tree,
308 int *meta_need,
309 int *credits);
Tao Mace9c5a52009-08-18 11:43:59 +0800310static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
311 struct ocfs2_xattr_bucket *bucket,
312 int offset,
313 struct ocfs2_xattr_value_root **xv,
314 struct buffer_head **bh);
Tao Maa3944252008-08-18 17:38:54 +0800315
Tiger Yang0030e002008-10-23 16:33:33 +0800316static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb)
317{
318 return (1 << osb->s_clustersize_bits) / OCFS2_XATTR_BUCKET_SIZE;
319}
320
321static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb)
322{
323 return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits);
324}
325
Joel Becker9c7759a2008-10-24 16:21:03 -0700326#define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr)
Joel Becker51def392008-10-24 16:57:21 -0700327#define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data)
Joel Becker3e632942008-10-24 17:04:49 -0700328#define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0))
Joel Becker9c7759a2008-10-24 16:21:03 -0700329
Joel Beckerba937122008-10-24 19:13:20 -0700330static struct ocfs2_xattr_bucket *ocfs2_xattr_bucket_new(struct inode *inode)
Joel Becker6dde41d2008-10-24 17:16:48 -0700331{
Joel Beckerba937122008-10-24 19:13:20 -0700332 struct ocfs2_xattr_bucket *bucket;
333 int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Joel Becker6dde41d2008-10-24 17:16:48 -0700334
Joel Beckerba937122008-10-24 19:13:20 -0700335 BUG_ON(blks > OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET);
336
337 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS);
338 if (bucket) {
339 bucket->bu_inode = inode;
340 bucket->bu_blocks = blks;
341 }
342
343 return bucket;
344}
345
346static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket)
347{
348 int i;
349
350 for (i = 0; i < bucket->bu_blocks; i++) {
Joel Becker6dde41d2008-10-24 17:16:48 -0700351 brelse(bucket->bu_bhs[i]);
352 bucket->bu_bhs[i] = NULL;
353 }
354}
355
Joel Beckerba937122008-10-24 19:13:20 -0700356static void ocfs2_xattr_bucket_free(struct ocfs2_xattr_bucket *bucket)
357{
358 if (bucket) {
359 ocfs2_xattr_bucket_relse(bucket);
360 bucket->bu_inode = NULL;
361 kfree(bucket);
362 }
363}
364
Joel Becker784b8162008-10-24 17:33:40 -0700365/*
366 * A bucket that has never been written to disk doesn't need to be
367 * read. We just need the buffer_heads. Don't call this for
368 * buckets that are already on disk. ocfs2_read_xattr_bucket() initializes
369 * them fully.
370 */
Joel Beckerba937122008-10-24 19:13:20 -0700371static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
Joel Becker784b8162008-10-24 17:33:40 -0700372 u64 xb_blkno)
373{
374 int i, rc = 0;
Joel Becker784b8162008-10-24 17:33:40 -0700375
Joel Beckerba937122008-10-24 19:13:20 -0700376 for (i = 0; i < bucket->bu_blocks; i++) {
377 bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb,
378 xb_blkno + i);
Joel Becker784b8162008-10-24 17:33:40 -0700379 if (!bucket->bu_bhs[i]) {
380 rc = -EIO;
381 mlog_errno(rc);
382 break;
383 }
384
Joel Becker8cb471e2009-02-10 20:00:41 -0800385 if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
Tao Ma757055a2008-11-06 08:10:48 +0800386 bucket->bu_bhs[i]))
Joel Becker8cb471e2009-02-10 20:00:41 -0800387 ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
Tao Ma757055a2008-11-06 08:10:48 +0800388 bucket->bu_bhs[i]);
Joel Becker784b8162008-10-24 17:33:40 -0700389 }
390
391 if (rc)
Joel Beckerba937122008-10-24 19:13:20 -0700392 ocfs2_xattr_bucket_relse(bucket);
Joel Becker784b8162008-10-24 17:33:40 -0700393 return rc;
394}
395
396/* Read the xattr bucket at xb_blkno */
Joel Beckerba937122008-10-24 19:13:20 -0700397static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
Joel Becker784b8162008-10-24 17:33:40 -0700398 u64 xb_blkno)
399{
Joel Beckerba937122008-10-24 19:13:20 -0700400 int rc;
Joel Becker784b8162008-10-24 17:33:40 -0700401
Joel Becker8cb471e2009-02-10 20:00:41 -0800402 rc = ocfs2_read_blocks(INODE_CACHE(bucket->bu_inode), xb_blkno,
Joel Becker970e4932008-11-13 14:49:19 -0800403 bucket->bu_blocks, bucket->bu_bhs, 0,
404 NULL);
Joel Becker4d0e2142008-12-05 11:19:37 -0800405 if (!rc) {
Tao Mac8b9cf92009-02-24 17:40:26 -0800406 spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
Joel Becker4d0e2142008-12-05 11:19:37 -0800407 rc = ocfs2_validate_meta_ecc_bhs(bucket->bu_inode->i_sb,
408 bucket->bu_bhs,
409 bucket->bu_blocks,
410 &bucket_xh(bucket)->xh_check);
Tao Mac8b9cf92009-02-24 17:40:26 -0800411 spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
Joel Becker4d0e2142008-12-05 11:19:37 -0800412 if (rc)
413 mlog_errno(rc);
414 }
415
Joel Becker784b8162008-10-24 17:33:40 -0700416 if (rc)
Joel Beckerba937122008-10-24 19:13:20 -0700417 ocfs2_xattr_bucket_relse(bucket);
Joel Becker784b8162008-10-24 17:33:40 -0700418 return rc;
419}
420
Joel Becker1224be02008-10-24 18:47:33 -0700421static int ocfs2_xattr_bucket_journal_access(handle_t *handle,
Joel Becker1224be02008-10-24 18:47:33 -0700422 struct ocfs2_xattr_bucket *bucket,
423 int type)
424{
425 int i, rc = 0;
Joel Becker1224be02008-10-24 18:47:33 -0700426
Joel Beckerba937122008-10-24 19:13:20 -0700427 for (i = 0; i < bucket->bu_blocks; i++) {
Joel Becker0cf2f762009-02-12 16:41:25 -0800428 rc = ocfs2_journal_access(handle,
429 INODE_CACHE(bucket->bu_inode),
Joel Becker1224be02008-10-24 18:47:33 -0700430 bucket->bu_bhs[i], type);
431 if (rc) {
432 mlog_errno(rc);
433 break;
434 }
435 }
436
437 return rc;
438}
439
440static void ocfs2_xattr_bucket_journal_dirty(handle_t *handle,
Joel Becker1224be02008-10-24 18:47:33 -0700441 struct ocfs2_xattr_bucket *bucket)
442{
Joel Beckerba937122008-10-24 19:13:20 -0700443 int i;
Joel Becker1224be02008-10-24 18:47:33 -0700444
Tao Mac8b9cf92009-02-24 17:40:26 -0800445 spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
Joel Becker4d0e2142008-12-05 11:19:37 -0800446 ocfs2_compute_meta_ecc_bhs(bucket->bu_inode->i_sb,
447 bucket->bu_bhs, bucket->bu_blocks,
448 &bucket_xh(bucket)->xh_check);
Tao Mac8b9cf92009-02-24 17:40:26 -0800449 spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
Joel Becker4d0e2142008-12-05 11:19:37 -0800450
Joel Beckerba937122008-10-24 19:13:20 -0700451 for (i = 0; i < bucket->bu_blocks; i++)
Joel Becker1224be02008-10-24 18:47:33 -0700452 ocfs2_journal_dirty(handle, bucket->bu_bhs[i]);
453}
454
Joel Beckerba937122008-10-24 19:13:20 -0700455static void ocfs2_xattr_bucket_copy_data(struct ocfs2_xattr_bucket *dest,
Joel Becker4980c6d2008-10-24 18:54:43 -0700456 struct ocfs2_xattr_bucket *src)
457{
458 int i;
Joel Beckerba937122008-10-24 19:13:20 -0700459 int blocksize = src->bu_inode->i_sb->s_blocksize;
Joel Becker4980c6d2008-10-24 18:54:43 -0700460
Joel Beckerba937122008-10-24 19:13:20 -0700461 BUG_ON(dest->bu_blocks != src->bu_blocks);
462 BUG_ON(dest->bu_inode != src->bu_inode);
463
464 for (i = 0; i < src->bu_blocks; i++) {
Joel Becker4980c6d2008-10-24 18:54:43 -0700465 memcpy(bucket_block(dest, i), bucket_block(src, i),
466 blocksize);
467 }
468}
Joel Becker1224be02008-10-24 18:47:33 -0700469
Joel Becker4ae1d692008-11-13 14:49:18 -0800470static int ocfs2_validate_xattr_block(struct super_block *sb,
471 struct buffer_head *bh)
472{
Joel Beckerd6b32bb2008-10-17 14:55:01 -0700473 int rc;
Joel Becker4ae1d692008-11-13 14:49:18 -0800474 struct ocfs2_xattr_block *xb =
475 (struct ocfs2_xattr_block *)bh->b_data;
476
Tao Ma402b4182011-02-23 22:01:17 +0800477 trace_ocfs2_validate_xattr_block((unsigned long long)bh->b_blocknr);
Joel Becker4ae1d692008-11-13 14:49:18 -0800478
Joel Beckerd6b32bb2008-10-17 14:55:01 -0700479 BUG_ON(!buffer_uptodate(bh));
480
481 /*
482 * If the ecc fails, we return the error but otherwise
483 * leave the filesystem running. We know any error is
484 * local to this block.
485 */
486 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &xb->xb_check);
487 if (rc)
488 return rc;
489
490 /*
491 * Errors after here are fatal
492 */
493
Joel Becker4ae1d692008-11-13 14:49:18 -0800494 if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) {
495 ocfs2_error(sb,
496 "Extended attribute block #%llu has bad "
497 "signature %.*s",
498 (unsigned long long)bh->b_blocknr, 7,
499 xb->xb_signature);
500 return -EINVAL;
501 }
502
503 if (le64_to_cpu(xb->xb_blkno) != bh->b_blocknr) {
504 ocfs2_error(sb,
505 "Extended attribute block #%llu has an "
506 "invalid xb_blkno of %llu",
507 (unsigned long long)bh->b_blocknr,
508 (unsigned long long)le64_to_cpu(xb->xb_blkno));
509 return -EINVAL;
510 }
511
512 if (le32_to_cpu(xb->xb_fs_generation) != OCFS2_SB(sb)->fs_generation) {
513 ocfs2_error(sb,
514 "Extended attribute block #%llu has an invalid "
515 "xb_fs_generation of #%u",
516 (unsigned long long)bh->b_blocknr,
517 le32_to_cpu(xb->xb_fs_generation));
518 return -EINVAL;
519 }
520
521 return 0;
522}
523
524static int ocfs2_read_xattr_block(struct inode *inode, u64 xb_blkno,
525 struct buffer_head **bh)
526{
527 int rc;
528 struct buffer_head *tmp = *bh;
529
Joel Becker8cb471e2009-02-10 20:00:41 -0800530 rc = ocfs2_read_block(INODE_CACHE(inode), xb_blkno, &tmp,
Joel Becker970e4932008-11-13 14:49:19 -0800531 ocfs2_validate_xattr_block);
Joel Becker4ae1d692008-11-13 14:49:18 -0800532
533 /* If ocfs2_read_block() got us a new bh, pass it up. */
534 if (!rc && !*bh)
535 *bh = tmp;
536
537 return rc;
538}
539
Tao Ma936b8832008-10-09 23:06:14 +0800540static inline const char *ocfs2_xattr_prefix(int name_index)
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800541{
Stephen Hemminger537d81c2010-05-13 17:53:22 -0700542 const struct xattr_handler *handler = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800543
544 if (name_index > 0 && name_index < OCFS2_XATTR_MAX)
545 handler = ocfs2_xattr_handler_map[name_index];
546
Tao Ma936b8832008-10-09 23:06:14 +0800547 return handler ? handler->prefix : NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800548}
549
Mark Fasheh40daa162008-10-07 14:31:42 -0700550static u32 ocfs2_xattr_name_hash(struct inode *inode,
Tao Ma2057e5c2008-10-09 23:06:13 +0800551 const char *name,
Mark Fasheh40daa162008-10-07 14:31:42 -0700552 int name_len)
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800553{
554 /* Get hash value of uuid from super block */
555 u32 hash = OCFS2_SB(inode->i_sb)->uuid_hash;
556 int i;
557
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800558 /* hash extended attribute name */
559 for (i = 0; i < name_len; i++) {
560 hash = (hash << OCFS2_HASH_SHIFT) ^
561 (hash >> (8*sizeof(hash) - OCFS2_HASH_SHIFT)) ^
562 *name++;
563 }
564
565 return hash;
566}
567
Tiger Yang534eadd2008-11-14 11:16:41 +0800568static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len)
569{
Joel Becker199799a2009-08-14 19:04:15 -0700570 return namevalue_size(name_len, value_len) +
571 sizeof(struct ocfs2_xattr_entry);
572}
Tiger Yang534eadd2008-11-14 11:16:41 +0800573
Joel Becker199799a2009-08-14 19:04:15 -0700574static int ocfs2_xi_entry_usage(struct ocfs2_xattr_info *xi)
575{
576 return namevalue_size_xi(xi) +
577 sizeof(struct ocfs2_xattr_entry);
578}
Tiger Yang534eadd2008-11-14 11:16:41 +0800579
Joel Becker199799a2009-08-14 19:04:15 -0700580static int ocfs2_xe_entry_usage(struct ocfs2_xattr_entry *xe)
581{
582 return namevalue_size_xe(xe) +
583 sizeof(struct ocfs2_xattr_entry);
Tiger Yang534eadd2008-11-14 11:16:41 +0800584}
585
586int ocfs2_calc_security_init(struct inode *dir,
587 struct ocfs2_security_xattr_info *si,
588 int *want_clusters,
589 int *xattr_credits,
590 struct ocfs2_alloc_context **xattr_ac)
591{
592 int ret = 0;
593 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
594 int s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
595 si->value_len);
596
597 /*
598 * The max space of security xattr taken inline is
599 * 256(name) + 80(value) + 16(entry) = 352 bytes,
600 * So reserve one metadata block for it is ok.
601 */
602 if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
603 s_size > OCFS2_XATTR_FREE_IN_IBODY) {
604 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, xattr_ac);
605 if (ret) {
606 mlog_errno(ret);
607 return ret;
608 }
609 *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
610 }
611
612 /* reserve clusters for xattr value which will be set in B tree*/
Tiger Yang0e445b62008-12-09 16:42:51 +0800613 if (si->value_len > OCFS2_XATTR_INLINE_SIZE) {
614 int new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
615 si->value_len);
616
617 *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
618 new_clusters);
619 *want_clusters += new_clusters;
620 }
Tiger Yang534eadd2008-11-14 11:16:41 +0800621 return ret;
622}
623
Tiger Yang89c38bd2008-11-14 11:17:41 +0800624int ocfs2_calc_xattr_init(struct inode *dir,
625 struct buffer_head *dir_bh,
Al Viro67697cb2011-07-26 02:55:32 -0400626 umode_t mode,
Tiger Yang89c38bd2008-11-14 11:17:41 +0800627 struct ocfs2_security_xattr_info *si,
628 int *want_clusters,
629 int *xattr_credits,
Mark Fasheh9b7895e2008-11-12 16:27:44 -0800630 int *want_meta)
Tiger Yang89c38bd2008-11-14 11:17:41 +0800631{
632 int ret = 0;
633 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
Tiger Yang0e445b62008-12-09 16:42:51 +0800634 int s_size = 0, a_size = 0, acl_len = 0, new_clusters;
Tiger Yang89c38bd2008-11-14 11:17:41 +0800635
636 if (si->enable)
637 s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
638 si->value_len);
639
640 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
641 acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
642 OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
643 "", NULL, 0);
644 if (acl_len > 0) {
645 a_size = ocfs2_xattr_entry_real_size(0, acl_len);
646 if (S_ISDIR(mode))
647 a_size <<= 1;
648 } else if (acl_len != 0 && acl_len != -ENODATA) {
649 mlog_errno(ret);
650 return ret;
651 }
652 }
653
654 if (!(s_size + a_size))
655 return ret;
656
657 /*
658 * The max space of security xattr taken inline is
659 * 256(name) + 80(value) + 16(entry) = 352 bytes,
660 * The max space of acl xattr taken inline is
661 * 80(value) + 16(entry) * 2(if directory) = 192 bytes,
662 * when blocksize = 512, may reserve one more cluser for
663 * xattr bucket, otherwise reserve one metadata block
664 * for them is ok.
Tiger Yang6c9fd1d2009-03-06 10:19:30 +0800665 * If this is a new directory with inline data,
666 * we choose to reserve the entire inline area for
667 * directory contents and force an external xattr block.
Tiger Yang89c38bd2008-11-14 11:17:41 +0800668 */
669 if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
Tiger Yang6c9fd1d2009-03-06 10:19:30 +0800670 (S_ISDIR(mode) && ocfs2_supports_inline_data(osb)) ||
Tiger Yang89c38bd2008-11-14 11:17:41 +0800671 (s_size + a_size) > OCFS2_XATTR_FREE_IN_IBODY) {
Mark Fasheh9b7895e2008-11-12 16:27:44 -0800672 *want_meta = *want_meta + 1;
Tiger Yang89c38bd2008-11-14 11:17:41 +0800673 *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
674 }
675
676 if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE &&
677 (s_size + a_size) > OCFS2_XATTR_FREE_IN_BLOCK(dir)) {
678 *want_clusters += 1;
679 *xattr_credits += ocfs2_blocks_per_xattr_bucket(dir->i_sb);
680 }
681
Tiger Yang0e445b62008-12-09 16:42:51 +0800682 /*
683 * reserve credits and clusters for xattrs which has large value
684 * and have to be set outside
685 */
686 if (si->enable && si->value_len > OCFS2_XATTR_INLINE_SIZE) {
687 new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
688 si->value_len);
689 *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
690 new_clusters);
691 *want_clusters += new_clusters;
692 }
Tiger Yang89c38bd2008-11-14 11:17:41 +0800693 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL &&
694 acl_len > OCFS2_XATTR_INLINE_SIZE) {
Tiger Yang0e445b62008-12-09 16:42:51 +0800695 /* for directory, it has DEFAULT and ACCESS two types of acls */
696 new_clusters = (S_ISDIR(mode) ? 2 : 1) *
697 ocfs2_clusters_for_bytes(dir->i_sb, acl_len);
698 *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
699 new_clusters);
700 *want_clusters += new_clusters;
Tiger Yang89c38bd2008-11-14 11:17:41 +0800701 }
702
703 return ret;
704}
705
Tao Maf56654c2008-08-18 17:38:48 +0800706static int ocfs2_xattr_extend_allocation(struct inode *inode,
707 u32 clusters_to_add,
Joel Becker19b801f2008-12-09 14:36:50 -0800708 struct ocfs2_xattr_value_buf *vb,
Tao Ma78f30c32008-11-12 08:27:00 +0800709 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Maf56654c2008-08-18 17:38:48 +0800710{
Tao Maa78f9f42010-07-09 14:53:11 +0800711 int status = 0, credits;
Tao Ma85db90e2008-11-12 08:27:01 +0800712 handle_t *handle = ctxt->handle;
Tao Maf56654c2008-08-18 17:38:48 +0800713 enum ocfs2_alloc_restarted why;
Joel Becker19b801f2008-12-09 14:36:50 -0800714 u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
Joel Beckerf99b9b72008-08-20 19:36:33 -0700715 struct ocfs2_extent_tree et;
Tao Maf56654c2008-08-18 17:38:48 +0800716
Joel Becker5e404e92009-02-13 03:54:22 -0800717 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
Joel Beckerf99b9b72008-08-20 19:36:33 -0700718
Tao Maa78f9f42010-07-09 14:53:11 +0800719 while (clusters_to_add) {
Tao Ma402b4182011-02-23 22:01:17 +0800720 trace_ocfs2_xattr_extend_allocation(clusters_to_add);
721
Tao Maa78f9f42010-07-09 14:53:11 +0800722 status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
723 OCFS2_JOURNAL_ACCESS_WRITE);
724 if (status < 0) {
725 mlog_errno(status);
726 break;
727 }
728
729 prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
730 status = ocfs2_add_clusters_in_btree(handle,
731 &et,
732 &logical_start,
733 clusters_to_add,
734 0,
735 ctxt->data_ac,
736 ctxt->meta_ac,
737 &why);
738 if ((status < 0) && (status != -EAGAIN)) {
739 if (status != -ENOSPC)
740 mlog_errno(status);
741 break;
742 }
743
744 ocfs2_journal_dirty(handle, vb->vb_bh);
745
746 clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) -
747 prev_clusters;
748
749 if (why != RESTART_NONE && clusters_to_add) {
750 /*
751 * We can only fail in case the alloc file doesn't give
752 * up enough clusters.
753 */
754 BUG_ON(why == RESTART_META);
755
Tao Maa78f9f42010-07-09 14:53:11 +0800756 credits = ocfs2_calc_extend_credits(inode->i_sb,
757 &vb->vb_xv->xr_list,
758 clusters_to_add);
759 status = ocfs2_extend_trans(handle, credits);
760 if (status < 0) {
761 status = -ENOMEM;
762 mlog_errno(status);
763 break;
764 }
765 }
Tao Maf56654c2008-08-18 17:38:48 +0800766 }
767
Tao Maf56654c2008-08-18 17:38:48 +0800768 return status;
769}
770
771static int __ocfs2_remove_xattr_range(struct inode *inode,
Joel Beckerd72cc722008-12-09 14:30:41 -0800772 struct ocfs2_xattr_value_buf *vb,
Tao Maf56654c2008-08-18 17:38:48 +0800773 u32 cpos, u32 phys_cpos, u32 len,
Tao Ma492a8a32009-08-18 11:43:17 +0800774 unsigned int ext_flags,
Tao Ma78f30c32008-11-12 08:27:00 +0800775 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Maf56654c2008-08-18 17:38:48 +0800776{
777 int ret;
778 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
Tao Ma85db90e2008-11-12 08:27:01 +0800779 handle_t *handle = ctxt->handle;
Joel Beckerf99b9b72008-08-20 19:36:33 -0700780 struct ocfs2_extent_tree et;
Tao Maf56654c2008-08-18 17:38:48 +0800781
Joel Becker5e404e92009-02-13 03:54:22 -0800782 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
Joel Beckerf99b9b72008-08-20 19:36:33 -0700783
Joel Becker0cf2f762009-02-12 16:41:25 -0800784 ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
Joel Beckerd72cc722008-12-09 14:30:41 -0800785 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Maf56654c2008-08-18 17:38:48 +0800786 if (ret) {
787 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +0800788 goto out;
Tao Maf56654c2008-08-18 17:38:48 +0800789 }
790
Joel Beckerdbdcf6a2009-02-13 03:41:26 -0800791 ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac,
Tao Ma78f30c32008-11-12 08:27:00 +0800792 &ctxt->dealloc);
Tao Maf56654c2008-08-18 17:38:48 +0800793 if (ret) {
794 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +0800795 goto out;
Tao Maf56654c2008-08-18 17:38:48 +0800796 }
797
Joel Beckerd72cc722008-12-09 14:30:41 -0800798 le32_add_cpu(&vb->vb_xv->xr_clusters, -len);
Joel Beckerec20cec2010-03-19 14:13:52 -0700799 ocfs2_journal_dirty(handle, vb->vb_bh);
Tao Maf56654c2008-08-18 17:38:48 +0800800
Tao Ma492a8a32009-08-18 11:43:17 +0800801 if (ext_flags & OCFS2_EXT_REFCOUNTED)
802 ret = ocfs2_decrease_refcount(inode, handle,
803 ocfs2_blocks_to_clusters(inode->i_sb,
804 phys_blkno),
805 len, ctxt->meta_ac, &ctxt->dealloc, 1);
806 else
807 ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc,
808 phys_blkno, len);
Tao Maf56654c2008-08-18 17:38:48 +0800809 if (ret)
810 mlog_errno(ret);
811
Tao Maf56654c2008-08-18 17:38:48 +0800812out:
Tao Maf56654c2008-08-18 17:38:48 +0800813 return ret;
814}
815
816static int ocfs2_xattr_shrink_size(struct inode *inode,
817 u32 old_clusters,
818 u32 new_clusters,
Joel Becker19b801f2008-12-09 14:36:50 -0800819 struct ocfs2_xattr_value_buf *vb,
Tao Ma78f30c32008-11-12 08:27:00 +0800820 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Maf56654c2008-08-18 17:38:48 +0800821{
822 int ret = 0;
Tao Ma492a8a32009-08-18 11:43:17 +0800823 unsigned int ext_flags;
Tao Maf56654c2008-08-18 17:38:48 +0800824 u32 trunc_len, cpos, phys_cpos, alloc_size;
825 u64 block;
Tao Maf56654c2008-08-18 17:38:48 +0800826
827 if (old_clusters <= new_clusters)
828 return 0;
829
830 cpos = new_clusters;
831 trunc_len = old_clusters - new_clusters;
832 while (trunc_len) {
833 ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos,
Joel Beckerd72cc722008-12-09 14:30:41 -0800834 &alloc_size,
Tao Ma492a8a32009-08-18 11:43:17 +0800835 &vb->vb_xv->xr_list, &ext_flags);
Tao Maf56654c2008-08-18 17:38:48 +0800836 if (ret) {
837 mlog_errno(ret);
838 goto out;
839 }
840
841 if (alloc_size > trunc_len)
842 alloc_size = trunc_len;
843
Joel Becker19b801f2008-12-09 14:36:50 -0800844 ret = __ocfs2_remove_xattr_range(inode, vb, cpos,
Tao Maf56654c2008-08-18 17:38:48 +0800845 phys_cpos, alloc_size,
Tao Ma492a8a32009-08-18 11:43:17 +0800846 ext_flags, ctxt);
Tao Maf56654c2008-08-18 17:38:48 +0800847 if (ret) {
848 mlog_errno(ret);
849 goto out;
850 }
851
852 block = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
Joel Becker8cb471e2009-02-10 20:00:41 -0800853 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode),
854 block, alloc_size);
Tao Maf56654c2008-08-18 17:38:48 +0800855 cpos += alloc_size;
856 trunc_len -= alloc_size;
857 }
858
859out:
Tao Maf56654c2008-08-18 17:38:48 +0800860 return ret;
861}
862
863static int ocfs2_xattr_value_truncate(struct inode *inode,
Joel Beckerb3e5d372008-12-09 15:01:04 -0800864 struct ocfs2_xattr_value_buf *vb,
Tao Ma78f30c32008-11-12 08:27:00 +0800865 int len,
866 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Maf56654c2008-08-18 17:38:48 +0800867{
868 int ret;
869 u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, len);
Joel Beckerb3e5d372008-12-09 15:01:04 -0800870 u32 old_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
Tao Maf56654c2008-08-18 17:38:48 +0800871
872 if (new_clusters == old_clusters)
873 return 0;
874
875 if (new_clusters > old_clusters)
876 ret = ocfs2_xattr_extend_allocation(inode,
877 new_clusters - old_clusters,
Joel Beckerb3e5d372008-12-09 15:01:04 -0800878 vb, ctxt);
Tao Maf56654c2008-08-18 17:38:48 +0800879 else
880 ret = ocfs2_xattr_shrink_size(inode,
881 old_clusters, new_clusters,
Joel Beckerb3e5d372008-12-09 15:01:04 -0800882 vb, ctxt);
Tao Maf56654c2008-08-18 17:38:48 +0800883
884 return ret;
885}
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800886
Tao Ma936b8832008-10-09 23:06:14 +0800887static int ocfs2_xattr_list_entry(char *buffer, size_t size,
888 size_t *result, const char *prefix,
889 const char *name, int name_len)
890{
891 char *p = buffer + *result;
892 int prefix_len = strlen(prefix);
893 int total_len = prefix_len + name_len + 1;
894
895 *result += total_len;
896
897 /* we are just looking for how big our buffer needs to be */
898 if (!size)
899 return 0;
900
901 if (*result > size)
902 return -ERANGE;
903
904 memcpy(p, prefix, prefix_len);
905 memcpy(p + prefix_len, name, name_len);
906 p[prefix_len + name_len] = '\0';
907
908 return 0;
909}
910
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800911static int ocfs2_xattr_list_entries(struct inode *inode,
912 struct ocfs2_xattr_header *header,
913 char *buffer, size_t buffer_size)
914{
Tao Ma936b8832008-10-09 23:06:14 +0800915 size_t result = 0;
916 int i, type, ret;
917 const char *prefix, *name;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800918
919 for (i = 0 ; i < le16_to_cpu(header->xh_count); i++) {
920 struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
Tao Ma936b8832008-10-09 23:06:14 +0800921 type = ocfs2_xattr_get_type(entry);
922 prefix = ocfs2_xattr_prefix(type);
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800923
Tao Ma936b8832008-10-09 23:06:14 +0800924 if (prefix) {
925 name = (const char *)header +
926 le16_to_cpu(entry->xe_name_offset);
927
928 ret = ocfs2_xattr_list_entry(buffer, buffer_size,
929 &result, prefix, name,
930 entry->xe_name_len);
931 if (ret)
932 return ret;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800933 }
934 }
935
Tao Ma936b8832008-10-09 23:06:14 +0800936 return result;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800937}
938
Tao Ma8b2c0db2009-08-18 11:43:49 +0800939int ocfs2_has_inline_xattr_value_outside(struct inode *inode,
940 struct ocfs2_dinode *di)
941{
942 struct ocfs2_xattr_header *xh;
943 int i;
944
945 xh = (struct ocfs2_xattr_header *)
946 ((void *)di + inode->i_sb->s_blocksize -
947 le16_to_cpu(di->i_xattr_inline_size));
948
949 for (i = 0; i < le16_to_cpu(xh->xh_count); i++)
950 if (!ocfs2_xattr_is_local(&xh->xh_entries[i]))
951 return 1;
952
953 return 0;
954}
955
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800956static int ocfs2_xattr_ibody_list(struct inode *inode,
957 struct ocfs2_dinode *di,
958 char *buffer,
959 size_t buffer_size)
960{
961 struct ocfs2_xattr_header *header = NULL;
962 struct ocfs2_inode_info *oi = OCFS2_I(inode);
963 int ret = 0;
964
965 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
966 return ret;
967
968 header = (struct ocfs2_xattr_header *)
969 ((void *)di + inode->i_sb->s_blocksize -
970 le16_to_cpu(di->i_xattr_inline_size));
971
972 ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size);
973
974 return ret;
975}
976
977static int ocfs2_xattr_block_list(struct inode *inode,
978 struct ocfs2_dinode *di,
979 char *buffer,
980 size_t buffer_size)
981{
982 struct buffer_head *blk_bh = NULL;
Tao Ma0c044f02008-08-18 17:38:50 +0800983 struct ocfs2_xattr_block *xb;
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800984 int ret = 0;
985
986 if (!di->i_xattr_loc)
987 return ret;
988
Joel Becker4ae1d692008-11-13 14:49:18 -0800989 ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
990 &blk_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800991 if (ret < 0) {
992 mlog_errno(ret);
993 return ret;
994 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +0800995
Tao Ma0c044f02008-08-18 17:38:50 +0800996 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
Tao Ma0c044f02008-08-18 17:38:50 +0800997 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
998 struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
999 ret = ocfs2_xattr_list_entries(inode, header,
1000 buffer, buffer_size);
Tao Ma47bca492009-08-18 11:43:42 +08001001 } else
1002 ret = ocfs2_xattr_tree_list_index_block(inode, blk_bh,
Tao Ma0c044f02008-08-18 17:38:50 +08001003 buffer, buffer_size);
Joel Becker4ae1d692008-11-13 14:49:18 -08001004
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001005 brelse(blk_bh);
1006
1007 return ret;
1008}
1009
1010ssize_t ocfs2_listxattr(struct dentry *dentry,
1011 char *buffer,
1012 size_t size)
1013{
1014 int ret = 0, i_ret = 0, b_ret = 0;
1015 struct buffer_head *di_bh = NULL;
1016 struct ocfs2_dinode *di = NULL;
1017 struct ocfs2_inode_info *oi = OCFS2_I(dentry->d_inode);
1018
Tiger Yang8154da32008-08-18 17:11:46 +08001019 if (!ocfs2_supports_xattr(OCFS2_SB(dentry->d_sb)))
1020 return -EOPNOTSUPP;
1021
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001022 if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
1023 return ret;
1024
1025 ret = ocfs2_inode_lock(dentry->d_inode, &di_bh, 0);
1026 if (ret < 0) {
1027 mlog_errno(ret);
1028 return ret;
1029 }
1030
1031 di = (struct ocfs2_dinode *)di_bh->b_data;
1032
1033 down_read(&oi->ip_xattr_sem);
1034 i_ret = ocfs2_xattr_ibody_list(dentry->d_inode, di, buffer, size);
1035 if (i_ret < 0)
1036 b_ret = 0;
1037 else {
1038 if (buffer) {
1039 buffer += i_ret;
1040 size -= i_ret;
1041 }
1042 b_ret = ocfs2_xattr_block_list(dentry->d_inode, di,
1043 buffer, size);
1044 if (b_ret < 0)
1045 i_ret = 0;
1046 }
1047 up_read(&oi->ip_xattr_sem);
1048 ocfs2_inode_unlock(dentry->d_inode, 0);
1049
1050 brelse(di_bh);
1051
1052 return i_ret + b_ret;
1053}
1054
1055static int ocfs2_xattr_find_entry(int name_index,
1056 const char *name,
1057 struct ocfs2_xattr_search *xs)
1058{
1059 struct ocfs2_xattr_entry *entry;
1060 size_t name_len;
1061 int i, cmp = 1;
1062
1063 if (name == NULL)
1064 return -EINVAL;
1065
1066 name_len = strlen(name);
1067 entry = xs->here;
1068 for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
1069 cmp = name_index - ocfs2_xattr_get_type(entry);
1070 if (!cmp)
1071 cmp = name_len - entry->xe_name_len;
1072 if (!cmp)
1073 cmp = memcmp(name, (xs->base +
1074 le16_to_cpu(entry->xe_name_offset)),
1075 name_len);
1076 if (cmp == 0)
1077 break;
1078 entry += 1;
1079 }
1080 xs->here = entry;
1081
1082 return cmp ? -ENODATA : 0;
1083}
1084
1085static int ocfs2_xattr_get_value_outside(struct inode *inode,
Tao Ma589dc262008-08-18 17:38:51 +08001086 struct ocfs2_xattr_value_root *xv,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001087 void *buffer,
1088 size_t len)
1089{
1090 u32 cpos, p_cluster, num_clusters, bpc, clusters;
1091 u64 blkno;
1092 int i, ret = 0;
1093 size_t cplen, blocksize;
1094 struct buffer_head *bh = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001095 struct ocfs2_extent_list *el;
1096
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001097 el = &xv->xr_list;
1098 clusters = le32_to_cpu(xv->xr_clusters);
1099 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
1100 blocksize = inode->i_sb->s_blocksize;
1101
1102 cpos = 0;
1103 while (cpos < clusters) {
1104 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
Tao Ma1061f9c2009-08-18 11:41:57 +08001105 &num_clusters, el, NULL);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001106 if (ret) {
1107 mlog_errno(ret);
1108 goto out;
1109 }
1110
1111 blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
1112 /* Copy ocfs2_xattr_value */
1113 for (i = 0; i < num_clusters * bpc; i++, blkno++) {
Joel Becker8cb471e2009-02-10 20:00:41 -08001114 ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
1115 &bh, NULL);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001116 if (ret) {
1117 mlog_errno(ret);
1118 goto out;
1119 }
1120
1121 cplen = len >= blocksize ? blocksize : len;
1122 memcpy(buffer, bh->b_data, cplen);
1123 len -= cplen;
1124 buffer += cplen;
1125
1126 brelse(bh);
1127 bh = NULL;
1128 if (len == 0)
1129 break;
1130 }
1131 cpos += num_clusters;
1132 }
1133out:
1134 return ret;
1135}
1136
1137static int ocfs2_xattr_ibody_get(struct inode *inode,
1138 int name_index,
1139 const char *name,
1140 void *buffer,
1141 size_t buffer_size,
1142 struct ocfs2_xattr_search *xs)
1143{
1144 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1145 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
Tao Ma589dc262008-08-18 17:38:51 +08001146 struct ocfs2_xattr_value_root *xv;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001147 size_t size;
1148 int ret = 0;
1149
1150 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
1151 return -ENODATA;
1152
1153 xs->end = (void *)di + inode->i_sb->s_blocksize;
1154 xs->header = (struct ocfs2_xattr_header *)
1155 (xs->end - le16_to_cpu(di->i_xattr_inline_size));
1156 xs->base = (void *)xs->header;
1157 xs->here = xs->header->xh_entries;
1158
1159 ret = ocfs2_xattr_find_entry(name_index, name, xs);
1160 if (ret)
1161 return ret;
1162 size = le64_to_cpu(xs->here->xe_value_size);
1163 if (buffer) {
1164 if (size > buffer_size)
1165 return -ERANGE;
1166 if (ocfs2_xattr_is_local(xs->here)) {
1167 memcpy(buffer, (void *)xs->base +
1168 le16_to_cpu(xs->here->xe_name_offset) +
1169 OCFS2_XATTR_SIZE(xs->here->xe_name_len), size);
1170 } else {
Tao Ma589dc262008-08-18 17:38:51 +08001171 xv = (struct ocfs2_xattr_value_root *)
1172 (xs->base + le16_to_cpu(
1173 xs->here->xe_name_offset) +
1174 OCFS2_XATTR_SIZE(xs->here->xe_name_len));
1175 ret = ocfs2_xattr_get_value_outside(inode, xv,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001176 buffer, size);
1177 if (ret < 0) {
1178 mlog_errno(ret);
1179 return ret;
1180 }
1181 }
1182 }
1183
1184 return size;
1185}
1186
1187static int ocfs2_xattr_block_get(struct inode *inode,
1188 int name_index,
1189 const char *name,
1190 void *buffer,
1191 size_t buffer_size,
1192 struct ocfs2_xattr_search *xs)
1193{
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001194 struct ocfs2_xattr_block *xb;
Tao Ma589dc262008-08-18 17:38:51 +08001195 struct ocfs2_xattr_value_root *xv;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001196 size_t size;
Subrata Modak44d8e4e2009-07-14 01:19:31 +05301197 int ret = -ENODATA, name_offset, name_len, i;
1198 int uninitialized_var(block_off);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001199
Joel Beckerba937122008-10-24 19:13:20 -07001200 xs->bucket = ocfs2_xattr_bucket_new(inode);
1201 if (!xs->bucket) {
1202 ret = -ENOMEM;
1203 mlog_errno(ret);
1204 goto cleanup;
1205 }
Tao Ma589dc262008-08-18 17:38:51 +08001206
Joel Becker54f443f2008-10-20 18:43:07 -07001207 ret = ocfs2_xattr_block_find(inode, name_index, name, xs);
1208 if (ret) {
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001209 mlog_errno(ret);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001210 goto cleanup;
1211 }
1212
Tiger Yang6c1e1832008-11-02 19:04:21 +08001213 if (xs->not_found) {
1214 ret = -ENODATA;
1215 goto cleanup;
1216 }
1217
Joel Becker54f443f2008-10-20 18:43:07 -07001218 xb = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001219 size = le64_to_cpu(xs->here->xe_value_size);
1220 if (buffer) {
1221 ret = -ERANGE;
1222 if (size > buffer_size)
1223 goto cleanup;
Tao Ma589dc262008-08-18 17:38:51 +08001224
1225 name_offset = le16_to_cpu(xs->here->xe_name_offset);
1226 name_len = OCFS2_XATTR_SIZE(xs->here->xe_name_len);
1227 i = xs->here - xs->header->xh_entries;
1228
1229 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
Tao Mafd68a892009-08-18 11:43:21 +08001230 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Joel Beckerba937122008-10-24 19:13:20 -07001231 bucket_xh(xs->bucket),
Tao Ma589dc262008-08-18 17:38:51 +08001232 i,
1233 &block_off,
1234 &name_offset);
Joel Beckerba937122008-10-24 19:13:20 -07001235 xs->base = bucket_block(xs->bucket, block_off);
Tao Ma589dc262008-08-18 17:38:51 +08001236 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001237 if (ocfs2_xattr_is_local(xs->here)) {
1238 memcpy(buffer, (void *)xs->base +
Tao Ma589dc262008-08-18 17:38:51 +08001239 name_offset + name_len, size);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001240 } else {
Tao Ma589dc262008-08-18 17:38:51 +08001241 xv = (struct ocfs2_xattr_value_root *)
1242 (xs->base + name_offset + name_len);
1243 ret = ocfs2_xattr_get_value_outside(inode, xv,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001244 buffer, size);
1245 if (ret < 0) {
1246 mlog_errno(ret);
1247 goto cleanup;
1248 }
1249 }
1250 }
1251 ret = size;
1252cleanup:
Joel Beckerba937122008-10-24 19:13:20 -07001253 ocfs2_xattr_bucket_free(xs->bucket);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001254
Joel Becker54f443f2008-10-20 18:43:07 -07001255 brelse(xs->xattr_bh);
1256 xs->xattr_bh = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001257 return ret;
1258}
1259
Tiger Yang4e3e9d02008-11-14 11:16:53 +08001260int ocfs2_xattr_get_nolock(struct inode *inode,
1261 struct buffer_head *di_bh,
Tiger Yang0030e002008-10-23 16:33:33 +08001262 int name_index,
1263 const char *name,
1264 void *buffer,
1265 size_t buffer_size)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001266{
1267 int ret;
1268 struct ocfs2_dinode *di = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001269 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1270 struct ocfs2_xattr_search xis = {
1271 .not_found = -ENODATA,
1272 };
1273 struct ocfs2_xattr_search xbs = {
1274 .not_found = -ENODATA,
1275 };
1276
Tiger Yang8154da32008-08-18 17:11:46 +08001277 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
1278 return -EOPNOTSUPP;
1279
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001280 if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
1281 ret = -ENODATA;
1282
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001283 xis.inode_bh = xbs.inode_bh = di_bh;
1284 di = (struct ocfs2_dinode *)di_bh->b_data;
1285
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001286 ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
1287 buffer_size, &xis);
Tiger Yang6c1e1832008-11-02 19:04:21 +08001288 if (ret == -ENODATA && di->i_xattr_loc)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001289 ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
1290 buffer_size, &xbs);
Tiger Yang4e3e9d02008-11-14 11:16:53 +08001291
1292 return ret;
1293}
1294
1295/* ocfs2_xattr_get()
1296 *
1297 * Copy an extended attribute into the buffer provided.
1298 * Buffer is NULL to compute the size of buffer required.
1299 */
1300static int ocfs2_xattr_get(struct inode *inode,
1301 int name_index,
1302 const char *name,
1303 void *buffer,
1304 size_t buffer_size)
1305{
1306 int ret;
1307 struct buffer_head *di_bh = NULL;
1308
1309 ret = ocfs2_inode_lock(inode, &di_bh, 0);
1310 if (ret < 0) {
1311 mlog_errno(ret);
1312 return ret;
1313 }
Tao Ma5e64b0d2010-09-07 13:30:05 +08001314 down_read(&OCFS2_I(inode)->ip_xattr_sem);
Tiger Yang4e3e9d02008-11-14 11:16:53 +08001315 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
1316 name, buffer, buffer_size);
Tao Ma5e64b0d2010-09-07 13:30:05 +08001317 up_read(&OCFS2_I(inode)->ip_xattr_sem);
Tiger Yang4e3e9d02008-11-14 11:16:53 +08001318
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001319 ocfs2_inode_unlock(inode, 0);
1320
1321 brelse(di_bh);
1322
1323 return ret;
1324}
1325
1326static int __ocfs2_xattr_set_value_outside(struct inode *inode,
Tao Ma85db90e2008-11-12 08:27:01 +08001327 handle_t *handle,
Tao Ma492a8a32009-08-18 11:43:17 +08001328 struct ocfs2_xattr_value_buf *vb,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001329 const void *value,
1330 int value_len)
1331{
Tao Ma71d548a2008-12-05 06:20:54 +08001332 int ret = 0, i, cp_len;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001333 u16 blocksize = inode->i_sb->s_blocksize;
1334 u32 p_cluster, num_clusters;
1335 u32 cpos = 0, bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
1336 u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len);
1337 u64 blkno;
1338 struct buffer_head *bh = NULL;
Tao Ma492a8a32009-08-18 11:43:17 +08001339 unsigned int ext_flags;
1340 struct ocfs2_xattr_value_root *xv = vb->vb_xv;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001341
1342 BUG_ON(clusters > le32_to_cpu(xv->xr_clusters));
1343
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001344 while (cpos < clusters) {
1345 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
Tao Ma1061f9c2009-08-18 11:41:57 +08001346 &num_clusters, &xv->xr_list,
Tao Ma492a8a32009-08-18 11:43:17 +08001347 &ext_flags);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001348 if (ret) {
1349 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08001350 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001351 }
1352
Tao Ma492a8a32009-08-18 11:43:17 +08001353 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
1354
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001355 blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
1356
1357 for (i = 0; i < num_clusters * bpc; i++, blkno++) {
Joel Becker8cb471e2009-02-10 20:00:41 -08001358 ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
1359 &bh, NULL);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001360 if (ret) {
1361 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08001362 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001363 }
1364
1365 ret = ocfs2_journal_access(handle,
Joel Becker0cf2f762009-02-12 16:41:25 -08001366 INODE_CACHE(inode),
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001367 bh,
1368 OCFS2_JOURNAL_ACCESS_WRITE);
1369 if (ret < 0) {
1370 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08001371 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001372 }
1373
1374 cp_len = value_len > blocksize ? blocksize : value_len;
1375 memcpy(bh->b_data, value, cp_len);
1376 value_len -= cp_len;
1377 value += cp_len;
1378 if (cp_len < blocksize)
1379 memset(bh->b_data + cp_len, 0,
1380 blocksize - cp_len);
1381
Joel Beckerec20cec2010-03-19 14:13:52 -07001382 ocfs2_journal_dirty(handle, bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001383 brelse(bh);
1384 bh = NULL;
1385
1386 /*
1387 * XXX: do we need to empty all the following
1388 * blocks in this cluster?
1389 */
1390 if (!value_len)
1391 break;
1392 }
1393 cpos += num_clusters;
1394 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001395out:
1396 brelse(bh);
1397
1398 return ret;
1399}
1400
Joel Becker69a3e532009-08-17 12:24:39 -07001401static int ocfs2_xa_check_space_helper(int needed_space, int free_start,
1402 int num_entries)
1403{
1404 int free_space;
1405
1406 if (!needed_space)
1407 return 0;
1408
1409 free_space = free_start -
1410 sizeof(struct ocfs2_xattr_header) -
1411 (num_entries * sizeof(struct ocfs2_xattr_entry)) -
1412 OCFS2_XATTR_HEADER_GAP;
1413 if (free_space < 0)
1414 return -EIO;
1415 if (free_space < needed_space)
1416 return -ENOSPC;
1417
1418 return 0;
1419}
1420
Joel Beckercf2bc802009-08-18 13:52:38 -07001421static int ocfs2_xa_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc,
1422 int type)
1423{
1424 return loc->xl_ops->xlo_journal_access(handle, loc, type);
1425}
1426
1427static void ocfs2_xa_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc)
1428{
1429 loc->xl_ops->xlo_journal_dirty(handle, loc);
1430}
1431
Joel Becker69a3e532009-08-17 12:24:39 -07001432/* Give a pointer into the storage for the given offset */
1433static void *ocfs2_xa_offset_pointer(struct ocfs2_xa_loc *loc, int offset)
1434{
1435 BUG_ON(offset >= loc->xl_size);
1436 return loc->xl_ops->xlo_offset_pointer(loc, offset);
1437}
1438
Tiger Yangcf1d6c72008-08-18 17:11:00 +08001439/*
Joel Becker11179f22009-08-14 16:07:44 -07001440 * Wipe the name+value pair and allow the storage to reclaim it. This
1441 * must be followed by either removal of the entry or a call to
1442 * ocfs2_xa_add_namevalue().
1443 */
1444static void ocfs2_xa_wipe_namevalue(struct ocfs2_xa_loc *loc)
1445{
1446 loc->xl_ops->xlo_wipe_namevalue(loc);
1447}
1448
Joel Becker69a3e532009-08-17 12:24:39 -07001449/*
1450 * Find lowest offset to a name+value pair. This is the start of our
1451 * downward-growing free space.
1452 */
1453static int ocfs2_xa_get_free_start(struct ocfs2_xa_loc *loc)
1454{
1455 return loc->xl_ops->xlo_get_free_start(loc);
1456}
1457
1458/* Can we reuse loc->xl_entry for xi? */
1459static int ocfs2_xa_can_reuse_entry(struct ocfs2_xa_loc *loc,
1460 struct ocfs2_xattr_info *xi)
1461{
1462 return loc->xl_ops->xlo_can_reuse(loc, xi);
1463}
1464
1465/* How much free space is needed to set the new value */
1466static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
1467 struct ocfs2_xattr_info *xi)
1468{
1469 return loc->xl_ops->xlo_check_space(loc, xi);
1470}
1471
1472static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1473{
1474 loc->xl_ops->xlo_add_entry(loc, name_hash);
1475 loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
1476 /*
1477 * We can't leave the new entry's xe_name_offset at zero or
1478 * add_namevalue() will go nuts. We set it to the size of our
1479 * storage so that it can never be less than any other entry.
1480 */
1481 loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
1482}
1483
1484static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
1485 struct ocfs2_xattr_info *xi)
1486{
1487 int size = namevalue_size_xi(xi);
1488 int nameval_offset;
1489 char *nameval_buf;
1490
1491 loc->xl_ops->xlo_add_namevalue(loc, size);
1492 loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
1493 loc->xl_entry->xe_name_len = xi->xi_name_len;
1494 ocfs2_xattr_set_type(loc->xl_entry, xi->xi_name_index);
1495 ocfs2_xattr_set_local(loc->xl_entry,
1496 xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE);
1497
1498 nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
1499 nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
1500 memset(nameval_buf, 0, size);
1501 memcpy(nameval_buf, xi->xi_name, xi->xi_name_len);
1502}
1503
Joel Becker3fc12af2009-08-18 13:20:27 -07001504static void ocfs2_xa_fill_value_buf(struct ocfs2_xa_loc *loc,
1505 struct ocfs2_xattr_value_buf *vb)
1506{
1507 int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
1508 int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
1509
1510 /* Value bufs are for value trees */
Joel Becker73857ee2009-08-18 20:26:41 -07001511 BUG_ON(ocfs2_xattr_is_local(loc->xl_entry));
Joel Becker3fc12af2009-08-18 13:20:27 -07001512 BUG_ON(namevalue_size_xe(loc->xl_entry) !=
1513 (name_size + OCFS2_XATTR_ROOT_SIZE));
1514
1515 loc->xl_ops->xlo_fill_value_buf(loc, vb);
1516 vb->vb_xv =
1517 (struct ocfs2_xattr_value_root *)ocfs2_xa_offset_pointer(loc,
1518 nameval_offset +
1519 name_size);
1520}
1521
Joel Beckercf2bc802009-08-18 13:52:38 -07001522static int ocfs2_xa_block_journal_access(handle_t *handle,
1523 struct ocfs2_xa_loc *loc, int type)
1524{
1525 struct buffer_head *bh = loc->xl_storage;
1526 ocfs2_journal_access_func access;
1527
1528 if (loc->xl_size == (bh->b_size -
1529 offsetof(struct ocfs2_xattr_block,
1530 xb_attrs.xb_header)))
1531 access = ocfs2_journal_access_xb;
1532 else
1533 access = ocfs2_journal_access_di;
1534 return access(handle, INODE_CACHE(loc->xl_inode), bh, type);
1535}
1536
1537static void ocfs2_xa_block_journal_dirty(handle_t *handle,
1538 struct ocfs2_xa_loc *loc)
1539{
1540 struct buffer_head *bh = loc->xl_storage;
1541
1542 ocfs2_journal_dirty(handle, bh);
1543}
1544
Joel Becker11179f22009-08-14 16:07:44 -07001545static void *ocfs2_xa_block_offset_pointer(struct ocfs2_xa_loc *loc,
1546 int offset)
1547{
Joel Becker11179f22009-08-14 16:07:44 -07001548 return (char *)loc->xl_header + offset;
1549}
1550
Joel Becker69a3e532009-08-17 12:24:39 -07001551static int ocfs2_xa_block_can_reuse(struct ocfs2_xa_loc *loc,
1552 struct ocfs2_xattr_info *xi)
1553{
1554 /*
1555 * Block storage is strict. If the sizes aren't exact, we will
1556 * remove the old one and reinsert the new.
1557 */
1558 return namevalue_size_xe(loc->xl_entry) ==
1559 namevalue_size_xi(xi);
1560}
1561
1562static int ocfs2_xa_block_get_free_start(struct ocfs2_xa_loc *loc)
1563{
1564 struct ocfs2_xattr_header *xh = loc->xl_header;
1565 int i, count = le16_to_cpu(xh->xh_count);
1566 int offset, free_start = loc->xl_size;
1567
1568 for (i = 0; i < count; i++) {
1569 offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
1570 if (offset < free_start)
1571 free_start = offset;
1572 }
1573
1574 return free_start;
1575}
1576
1577static int ocfs2_xa_block_check_space(struct ocfs2_xa_loc *loc,
1578 struct ocfs2_xattr_info *xi)
1579{
1580 int count = le16_to_cpu(loc->xl_header->xh_count);
1581 int free_start = ocfs2_xa_get_free_start(loc);
1582 int needed_space = ocfs2_xi_entry_usage(xi);
1583
1584 /*
1585 * Block storage will reclaim the original entry before inserting
1586 * the new value, so we only need the difference. If the new
1587 * entry is smaller than the old one, we don't need anything.
1588 */
1589 if (loc->xl_entry) {
1590 /* Don't need space if we're reusing! */
1591 if (ocfs2_xa_can_reuse_entry(loc, xi))
1592 needed_space = 0;
1593 else
1594 needed_space -= ocfs2_xe_entry_usage(loc->xl_entry);
1595 }
1596 if (needed_space < 0)
1597 needed_space = 0;
1598 return ocfs2_xa_check_space_helper(needed_space, free_start, count);
1599}
1600
Joel Becker11179f22009-08-14 16:07:44 -07001601/*
1602 * Block storage for xattrs keeps the name+value pairs compacted. When
1603 * we remove one, we have to shift any that preceded it towards the end.
1604 */
1605static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc)
1606{
1607 int i, offset;
1608 int namevalue_offset, first_namevalue_offset, namevalue_size;
1609 struct ocfs2_xattr_entry *entry = loc->xl_entry;
1610 struct ocfs2_xattr_header *xh = loc->xl_header;
Joel Becker11179f22009-08-14 16:07:44 -07001611 int count = le16_to_cpu(xh->xh_count);
1612
1613 namevalue_offset = le16_to_cpu(entry->xe_name_offset);
Joel Becker199799a2009-08-14 19:04:15 -07001614 namevalue_size = namevalue_size_xe(entry);
Joel Becker69a3e532009-08-17 12:24:39 -07001615 first_namevalue_offset = ocfs2_xa_get_free_start(loc);
Joel Becker11179f22009-08-14 16:07:44 -07001616
1617 /* Shift the name+value pairs */
1618 memmove((char *)xh + first_namevalue_offset + namevalue_size,
1619 (char *)xh + first_namevalue_offset,
1620 namevalue_offset - first_namevalue_offset);
1621 memset((char *)xh + first_namevalue_offset, 0, namevalue_size);
1622
1623 /* Now tell xh->xh_entries about it */
1624 for (i = 0; i < count; i++) {
1625 offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
Tao Madfe4d3d2010-03-19 15:04:23 +08001626 if (offset <= namevalue_offset)
Joel Becker11179f22009-08-14 16:07:44 -07001627 le16_add_cpu(&xh->xh_entries[i].xe_name_offset,
1628 namevalue_size);
1629 }
1630
1631 /*
1632 * Note that we don't update xh_free_start or xh_name_value_len
1633 * because they're not used in block-stored xattrs.
1634 */
1635}
1636
Joel Becker69a3e532009-08-17 12:24:39 -07001637static void ocfs2_xa_block_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1638{
1639 int count = le16_to_cpu(loc->xl_header->xh_count);
1640 loc->xl_entry = &(loc->xl_header->xh_entries[count]);
1641 le16_add_cpu(&loc->xl_header->xh_count, 1);
1642 memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
1643}
1644
1645static void ocfs2_xa_block_add_namevalue(struct ocfs2_xa_loc *loc, int size)
1646{
1647 int free_start = ocfs2_xa_get_free_start(loc);
1648
1649 loc->xl_entry->xe_name_offset = cpu_to_le16(free_start - size);
1650}
1651
Joel Becker3fc12af2009-08-18 13:20:27 -07001652static void ocfs2_xa_block_fill_value_buf(struct ocfs2_xa_loc *loc,
1653 struct ocfs2_xattr_value_buf *vb)
1654{
1655 struct buffer_head *bh = loc->xl_storage;
1656
1657 if (loc->xl_size == (bh->b_size -
1658 offsetof(struct ocfs2_xattr_block,
1659 xb_attrs.xb_header)))
1660 vb->vb_access = ocfs2_journal_access_xb;
1661 else
1662 vb->vb_access = ocfs2_journal_access_di;
1663 vb->vb_bh = bh;
1664}
1665
Joel Becker11179f22009-08-14 16:07:44 -07001666/*
1667 * Operations for xattrs stored in blocks. This includes inline inode
1668 * storage and unindexed ocfs2_xattr_blocks.
1669 */
1670static const struct ocfs2_xa_loc_operations ocfs2_xa_block_loc_ops = {
Joel Beckercf2bc802009-08-18 13:52:38 -07001671 .xlo_journal_access = ocfs2_xa_block_journal_access,
1672 .xlo_journal_dirty = ocfs2_xa_block_journal_dirty,
Joel Becker11179f22009-08-14 16:07:44 -07001673 .xlo_offset_pointer = ocfs2_xa_block_offset_pointer,
Joel Becker69a3e532009-08-17 12:24:39 -07001674 .xlo_check_space = ocfs2_xa_block_check_space,
1675 .xlo_can_reuse = ocfs2_xa_block_can_reuse,
1676 .xlo_get_free_start = ocfs2_xa_block_get_free_start,
Joel Becker11179f22009-08-14 16:07:44 -07001677 .xlo_wipe_namevalue = ocfs2_xa_block_wipe_namevalue,
Joel Becker69a3e532009-08-17 12:24:39 -07001678 .xlo_add_entry = ocfs2_xa_block_add_entry,
1679 .xlo_add_namevalue = ocfs2_xa_block_add_namevalue,
Joel Becker3fc12af2009-08-18 13:20:27 -07001680 .xlo_fill_value_buf = ocfs2_xa_block_fill_value_buf,
Joel Becker11179f22009-08-14 16:07:44 -07001681};
1682
Joel Beckercf2bc802009-08-18 13:52:38 -07001683static int ocfs2_xa_bucket_journal_access(handle_t *handle,
1684 struct ocfs2_xa_loc *loc, int type)
1685{
1686 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1687
1688 return ocfs2_xattr_bucket_journal_access(handle, bucket, type);
1689}
1690
1691static void ocfs2_xa_bucket_journal_dirty(handle_t *handle,
1692 struct ocfs2_xa_loc *loc)
1693{
1694 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1695
1696 ocfs2_xattr_bucket_journal_dirty(handle, bucket);
1697}
1698
Joel Becker11179f22009-08-14 16:07:44 -07001699static void *ocfs2_xa_bucket_offset_pointer(struct ocfs2_xa_loc *loc,
1700 int offset)
1701{
1702 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1703 int block, block_offset;
1704
Joel Becker11179f22009-08-14 16:07:44 -07001705 /* The header is at the front of the bucket */
Joel Beckercf2bc802009-08-18 13:52:38 -07001706 block = offset >> loc->xl_inode->i_sb->s_blocksize_bits;
1707 block_offset = offset % loc->xl_inode->i_sb->s_blocksize;
Joel Becker11179f22009-08-14 16:07:44 -07001708
1709 return bucket_block(bucket, block) + block_offset;
1710}
1711
Joel Becker69a3e532009-08-17 12:24:39 -07001712static int ocfs2_xa_bucket_can_reuse(struct ocfs2_xa_loc *loc,
1713 struct ocfs2_xattr_info *xi)
1714{
1715 return namevalue_size_xe(loc->xl_entry) >=
1716 namevalue_size_xi(xi);
1717}
1718
1719static int ocfs2_xa_bucket_get_free_start(struct ocfs2_xa_loc *loc)
1720{
1721 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
1722 return le16_to_cpu(bucket_xh(bucket)->xh_free_start);
1723}
1724
1725static int ocfs2_bucket_align_free_start(struct super_block *sb,
1726 int free_start, int size)
1727{
1728 /*
1729 * We need to make sure that the name+value pair fits within
1730 * one block.
1731 */
1732 if (((free_start - size) >> sb->s_blocksize_bits) !=
1733 ((free_start - 1) >> sb->s_blocksize_bits))
1734 free_start -= free_start % sb->s_blocksize;
1735
1736 return free_start;
1737}
1738
1739static int ocfs2_xa_bucket_check_space(struct ocfs2_xa_loc *loc,
1740 struct ocfs2_xattr_info *xi)
1741{
1742 int rc;
1743 int count = le16_to_cpu(loc->xl_header->xh_count);
1744 int free_start = ocfs2_xa_get_free_start(loc);
1745 int needed_space = ocfs2_xi_entry_usage(xi);
1746 int size = namevalue_size_xi(xi);
Joel Beckercf2bc802009-08-18 13:52:38 -07001747 struct super_block *sb = loc->xl_inode->i_sb;
Joel Becker69a3e532009-08-17 12:24:39 -07001748
1749 /*
1750 * Bucket storage does not reclaim name+value pairs it cannot
1751 * reuse. They live as holes until the bucket fills, and then
1752 * the bucket is defragmented. However, the bucket can reclaim
1753 * the ocfs2_xattr_entry.
1754 */
1755 if (loc->xl_entry) {
1756 /* Don't need space if we're reusing! */
1757 if (ocfs2_xa_can_reuse_entry(loc, xi))
1758 needed_space = 0;
1759 else
1760 needed_space -= sizeof(struct ocfs2_xattr_entry);
1761 }
1762 BUG_ON(needed_space < 0);
1763
1764 if (free_start < size) {
1765 if (needed_space)
1766 return -ENOSPC;
1767 } else {
1768 /*
1769 * First we check if it would fit in the first place.
1770 * Below, we align the free start to a block. This may
1771 * slide us below the minimum gap. By checking unaligned
1772 * first, we avoid that error.
1773 */
1774 rc = ocfs2_xa_check_space_helper(needed_space, free_start,
1775 count);
1776 if (rc)
1777 return rc;
1778 free_start = ocfs2_bucket_align_free_start(sb, free_start,
1779 size);
1780 }
1781 return ocfs2_xa_check_space_helper(needed_space, free_start, count);
1782}
1783
Joel Becker11179f22009-08-14 16:07:44 -07001784static void ocfs2_xa_bucket_wipe_namevalue(struct ocfs2_xa_loc *loc)
1785{
Joel Becker199799a2009-08-14 19:04:15 -07001786 le16_add_cpu(&loc->xl_header->xh_name_value_len,
1787 -namevalue_size_xe(loc->xl_entry));
Joel Becker11179f22009-08-14 16:07:44 -07001788}
1789
Joel Becker69a3e532009-08-17 12:24:39 -07001790static void ocfs2_xa_bucket_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1791{
1792 struct ocfs2_xattr_header *xh = loc->xl_header;
1793 int count = le16_to_cpu(xh->xh_count);
1794 int low = 0, high = count - 1, tmp;
1795 struct ocfs2_xattr_entry *tmp_xe;
1796
1797 /*
1798 * We keep buckets sorted by name_hash, so we need to find
1799 * our insert place.
1800 */
1801 while (low <= high && count) {
1802 tmp = (low + high) / 2;
1803 tmp_xe = &xh->xh_entries[tmp];
1804
1805 if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
1806 low = tmp + 1;
1807 else if (name_hash < le32_to_cpu(tmp_xe->xe_name_hash))
1808 high = tmp - 1;
1809 else {
1810 low = tmp;
1811 break;
1812 }
1813 }
1814
1815 if (low != count)
1816 memmove(&xh->xh_entries[low + 1],
1817 &xh->xh_entries[low],
1818 ((count - low) * sizeof(struct ocfs2_xattr_entry)));
1819
1820 le16_add_cpu(&xh->xh_count, 1);
1821 loc->xl_entry = &xh->xh_entries[low];
1822 memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
1823}
1824
1825static void ocfs2_xa_bucket_add_namevalue(struct ocfs2_xa_loc *loc, int size)
1826{
1827 int free_start = ocfs2_xa_get_free_start(loc);
1828 struct ocfs2_xattr_header *xh = loc->xl_header;
Joel Beckercf2bc802009-08-18 13:52:38 -07001829 struct super_block *sb = loc->xl_inode->i_sb;
Joel Becker69a3e532009-08-17 12:24:39 -07001830 int nameval_offset;
1831
1832 free_start = ocfs2_bucket_align_free_start(sb, free_start, size);
1833 nameval_offset = free_start - size;
1834 loc->xl_entry->xe_name_offset = cpu_to_le16(nameval_offset);
1835 xh->xh_free_start = cpu_to_le16(nameval_offset);
1836 le16_add_cpu(&xh->xh_name_value_len, size);
1837
1838}
1839
Joel Becker3fc12af2009-08-18 13:20:27 -07001840static void ocfs2_xa_bucket_fill_value_buf(struct ocfs2_xa_loc *loc,
1841 struct ocfs2_xattr_value_buf *vb)
1842{
1843 struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
Joel Beckercf2bc802009-08-18 13:52:38 -07001844 struct super_block *sb = loc->xl_inode->i_sb;
Joel Becker3fc12af2009-08-18 13:20:27 -07001845 int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
1846 int size = namevalue_size_xe(loc->xl_entry);
1847 int block_offset = nameval_offset >> sb->s_blocksize_bits;
1848
1849 /* Values are not allowed to straddle block boundaries */
1850 BUG_ON(block_offset !=
1851 ((nameval_offset + size - 1) >> sb->s_blocksize_bits));
1852 /* We expect the bucket to be filled in */
1853 BUG_ON(!bucket->bu_bhs[block_offset]);
1854
1855 vb->vb_access = ocfs2_journal_access;
1856 vb->vb_bh = bucket->bu_bhs[block_offset];
1857}
1858
Joel Becker11179f22009-08-14 16:07:44 -07001859/* Operations for xattrs stored in buckets. */
1860static const struct ocfs2_xa_loc_operations ocfs2_xa_bucket_loc_ops = {
Joel Beckercf2bc802009-08-18 13:52:38 -07001861 .xlo_journal_access = ocfs2_xa_bucket_journal_access,
1862 .xlo_journal_dirty = ocfs2_xa_bucket_journal_dirty,
Joel Becker11179f22009-08-14 16:07:44 -07001863 .xlo_offset_pointer = ocfs2_xa_bucket_offset_pointer,
Joel Becker69a3e532009-08-17 12:24:39 -07001864 .xlo_check_space = ocfs2_xa_bucket_check_space,
1865 .xlo_can_reuse = ocfs2_xa_bucket_can_reuse,
1866 .xlo_get_free_start = ocfs2_xa_bucket_get_free_start,
Joel Becker11179f22009-08-14 16:07:44 -07001867 .xlo_wipe_namevalue = ocfs2_xa_bucket_wipe_namevalue,
Joel Becker69a3e532009-08-17 12:24:39 -07001868 .xlo_add_entry = ocfs2_xa_bucket_add_entry,
1869 .xlo_add_namevalue = ocfs2_xa_bucket_add_namevalue,
Joel Becker3fc12af2009-08-18 13:20:27 -07001870 .xlo_fill_value_buf = ocfs2_xa_bucket_fill_value_buf,
Joel Becker11179f22009-08-14 16:07:44 -07001871};
1872
Joel Becker399ff3a2009-09-01 18:38:27 -07001873static unsigned int ocfs2_xa_value_clusters(struct ocfs2_xa_loc *loc)
1874{
1875 struct ocfs2_xattr_value_buf vb;
1876
1877 if (ocfs2_xattr_is_local(loc->xl_entry))
1878 return 0;
1879
1880 ocfs2_xa_fill_value_buf(loc, &vb);
1881 return le32_to_cpu(vb.vb_xv->xr_clusters);
1882}
1883
Joel Becker73857ee2009-08-18 20:26:41 -07001884static int ocfs2_xa_value_truncate(struct ocfs2_xa_loc *loc, u64 bytes,
1885 struct ocfs2_xattr_set_ctxt *ctxt)
1886{
1887 int trunc_rc, access_rc;
1888 struct ocfs2_xattr_value_buf vb;
1889
1890 ocfs2_xa_fill_value_buf(loc, &vb);
1891 trunc_rc = ocfs2_xattr_value_truncate(loc->xl_inode, &vb, bytes,
1892 ctxt);
1893
1894 /*
1895 * The caller of ocfs2_xa_value_truncate() has already called
1896 * ocfs2_xa_journal_access on the loc. However, The truncate code
1897 * calls ocfs2_extend_trans(). This may commit the previous
1898 * transaction and open a new one. If this is a bucket, truncate
1899 * could leave only vb->vb_bh set up for journaling. Meanwhile,
1900 * the caller is expecting to dirty the entire bucket. So we must
1901 * reset the journal work. We do this even if truncate has failed,
1902 * as it could have failed after committing the extend.
1903 */
1904 access_rc = ocfs2_xa_journal_access(ctxt->handle, loc,
1905 OCFS2_JOURNAL_ACCESS_WRITE);
1906
1907 /* Errors in truncate take precedence */
1908 return trunc_rc ? trunc_rc : access_rc;
1909}
1910
Joel Becker11179f22009-08-14 16:07:44 -07001911static void ocfs2_xa_remove_entry(struct ocfs2_xa_loc *loc)
1912{
Joel Beckerbde1e542009-08-14 16:58:38 -07001913 int index, count;
1914 struct ocfs2_xattr_header *xh = loc->xl_header;
1915 struct ocfs2_xattr_entry *entry = loc->xl_entry;
1916
Joel Becker11179f22009-08-14 16:07:44 -07001917 ocfs2_xa_wipe_namevalue(loc);
Joel Beckerbde1e542009-08-14 16:58:38 -07001918 loc->xl_entry = NULL;
1919
1920 le16_add_cpu(&xh->xh_count, -1);
1921 count = le16_to_cpu(xh->xh_count);
1922
1923 /*
1924 * Only zero out the entry if there are more remaining. This is
1925 * important for an empty bucket, as it keeps track of the
1926 * bucket's hash value. It doesn't hurt empty block storage.
1927 */
1928 if (count) {
1929 index = ((char *)entry - (char *)&xh->xh_entries) /
1930 sizeof(struct ocfs2_xattr_entry);
1931 memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1],
1932 (count - index) * sizeof(struct ocfs2_xattr_entry));
1933 memset(&xh->xh_entries[count], 0,
1934 sizeof(struct ocfs2_xattr_entry));
1935 }
Joel Becker11179f22009-08-14 16:07:44 -07001936}
1937
Joel Becker399ff3a2009-09-01 18:38:27 -07001938/*
1939 * If we have a problem adjusting the size of an external value during
1940 * ocfs2_xa_prepare_entry() or ocfs2_xa_remove(), we may have an xattr
1941 * in an intermediate state. For example, the value may be partially
1942 * truncated.
1943 *
1944 * If the value tree hasn't changed, the extend/truncate went nowhere.
1945 * We have nothing to do. The caller can treat it as a straight error.
1946 *
1947 * If the value tree got partially truncated, we now have a corrupted
1948 * extended attribute. We're going to wipe its entry and leak the
1949 * clusters. Better to leak some storage than leave a corrupt entry.
1950 *
1951 * If the value tree grew, it obviously didn't grow enough for the
1952 * new entry. We're not going to try and reclaim those clusters either.
1953 * If there was already an external value there (orig_clusters != 0),
1954 * the new clusters are attached safely and we can just leave the old
1955 * value in place. If there was no external value there, we remove
1956 * the entry.
1957 *
1958 * This way, the xattr block we store in the journal will be consistent.
1959 * If the size change broke because of the journal, no changes will hit
1960 * disk anyway.
1961 */
1962static void ocfs2_xa_cleanup_value_truncate(struct ocfs2_xa_loc *loc,
1963 const char *what,
1964 unsigned int orig_clusters)
1965{
1966 unsigned int new_clusters = ocfs2_xa_value_clusters(loc);
1967 char *nameval_buf = ocfs2_xa_offset_pointer(loc,
1968 le16_to_cpu(loc->xl_entry->xe_name_offset));
1969
1970 if (new_clusters < orig_clusters) {
1971 mlog(ML_ERROR,
1972 "Partial truncate while %s xattr %.*s. Leaking "
1973 "%u clusters and removing the entry\n",
1974 what, loc->xl_entry->xe_name_len, nameval_buf,
1975 orig_clusters - new_clusters);
1976 ocfs2_xa_remove_entry(loc);
1977 } else if (!orig_clusters) {
1978 mlog(ML_ERROR,
1979 "Unable to allocate an external value for xattr "
1980 "%.*s safely. Leaking %u clusters and removing the "
1981 "entry\n",
1982 loc->xl_entry->xe_name_len, nameval_buf,
1983 new_clusters - orig_clusters);
1984 ocfs2_xa_remove_entry(loc);
1985 } else if (new_clusters > orig_clusters)
1986 mlog(ML_ERROR,
1987 "Unable to grow xattr %.*s safely. %u new clusters "
1988 "have been added, but the value will not be "
1989 "modified\n",
1990 loc->xl_entry->xe_name_len, nameval_buf,
1991 new_clusters - orig_clusters);
1992}
1993
Joel Becker73857ee2009-08-18 20:26:41 -07001994static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc,
1995 struct ocfs2_xattr_set_ctxt *ctxt)
1996{
1997 int rc = 0;
Joel Becker399ff3a2009-09-01 18:38:27 -07001998 unsigned int orig_clusters;
Joel Becker73857ee2009-08-18 20:26:41 -07001999
2000 if (!ocfs2_xattr_is_local(loc->xl_entry)) {
Joel Becker399ff3a2009-09-01 18:38:27 -07002001 orig_clusters = ocfs2_xa_value_clusters(loc);
Joel Becker73857ee2009-08-18 20:26:41 -07002002 rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
2003 if (rc) {
2004 mlog_errno(rc);
Joel Becker399ff3a2009-09-01 18:38:27 -07002005 /*
2006 * Since this is remove, we can return 0 if
2007 * ocfs2_xa_cleanup_value_truncate() is going to
2008 * wipe the entry anyway. So we check the
2009 * cluster count as well.
2010 */
2011 if (orig_clusters != ocfs2_xa_value_clusters(loc))
2012 rc = 0;
2013 ocfs2_xa_cleanup_value_truncate(loc, "removing",
2014 orig_clusters);
2015 if (rc)
2016 goto out;
Joel Becker73857ee2009-08-18 20:26:41 -07002017 }
2018 }
2019
2020 ocfs2_xa_remove_entry(loc);
2021
2022out:
2023 return rc;
2024}
2025
2026static void ocfs2_xa_install_value_root(struct ocfs2_xa_loc *loc)
2027{
2028 int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
2029 char *nameval_buf;
2030
2031 nameval_buf = ocfs2_xa_offset_pointer(loc,
2032 le16_to_cpu(loc->xl_entry->xe_name_offset));
2033 memcpy(nameval_buf + name_size, &def_xv, OCFS2_XATTR_ROOT_SIZE);
2034}
2035
2036/*
2037 * Take an existing entry and make it ready for the new value. This
2038 * won't allocate space, but it may free space. It should be ready for
2039 * ocfs2_xa_prepare_entry() to finish the work.
2040 */
2041static int ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc,
2042 struct ocfs2_xattr_info *xi,
2043 struct ocfs2_xattr_set_ctxt *ctxt)
2044{
2045 int rc = 0;
2046 int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
Joel Becker399ff3a2009-09-01 18:38:27 -07002047 unsigned int orig_clusters;
Joel Becker73857ee2009-08-18 20:26:41 -07002048 char *nameval_buf;
2049 int xe_local = ocfs2_xattr_is_local(loc->xl_entry);
2050 int xi_local = xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE;
2051
2052 BUG_ON(OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len) !=
2053 name_size);
2054
2055 nameval_buf = ocfs2_xa_offset_pointer(loc,
2056 le16_to_cpu(loc->xl_entry->xe_name_offset));
2057 if (xe_local) {
2058 memset(nameval_buf + name_size, 0,
2059 namevalue_size_xe(loc->xl_entry) - name_size);
2060 if (!xi_local)
2061 ocfs2_xa_install_value_root(loc);
2062 } else {
Joel Becker399ff3a2009-09-01 18:38:27 -07002063 orig_clusters = ocfs2_xa_value_clusters(loc);
Joel Becker73857ee2009-08-18 20:26:41 -07002064 if (xi_local) {
2065 rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
Joel Becker399ff3a2009-09-01 18:38:27 -07002066 if (rc < 0)
Joel Becker73857ee2009-08-18 20:26:41 -07002067 mlog_errno(rc);
Joel Becker399ff3a2009-09-01 18:38:27 -07002068 else
2069 memset(nameval_buf + name_size, 0,
2070 namevalue_size_xe(loc->xl_entry) -
2071 name_size);
Joel Becker73857ee2009-08-18 20:26:41 -07002072 } else if (le64_to_cpu(loc->xl_entry->xe_value_size) >
2073 xi->xi_value_len) {
2074 rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len,
2075 ctxt);
Joel Becker399ff3a2009-09-01 18:38:27 -07002076 if (rc < 0)
Joel Becker73857ee2009-08-18 20:26:41 -07002077 mlog_errno(rc);
Joel Becker399ff3a2009-09-01 18:38:27 -07002078 }
2079
2080 if (rc) {
2081 ocfs2_xa_cleanup_value_truncate(loc, "reusing",
2082 orig_clusters);
2083 goto out;
Joel Becker73857ee2009-08-18 20:26:41 -07002084 }
2085 }
2086
2087 loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
2088 ocfs2_xattr_set_local(loc->xl_entry, xi_local);
2089
2090out:
2091 return rc;
2092}
2093
Joel Becker69a3e532009-08-17 12:24:39 -07002094/*
2095 * Prepares loc->xl_entry to receive the new xattr. This includes
2096 * properly setting up the name+value pair region. If loc->xl_entry
2097 * already exists, it will take care of modifying it appropriately.
Joel Becker69a3e532009-08-17 12:24:39 -07002098 *
2099 * Note that this modifies the data. You did journal_access already,
2100 * right?
2101 */
2102static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
2103 struct ocfs2_xattr_info *xi,
Joel Becker73857ee2009-08-18 20:26:41 -07002104 u32 name_hash,
2105 struct ocfs2_xattr_set_ctxt *ctxt)
Joel Becker69a3e532009-08-17 12:24:39 -07002106{
2107 int rc = 0;
Joel Becker399ff3a2009-09-01 18:38:27 -07002108 unsigned int orig_clusters;
2109 __le64 orig_value_size = 0;
Joel Becker69a3e532009-08-17 12:24:39 -07002110
Joel Becker69a3e532009-08-17 12:24:39 -07002111 rc = ocfs2_xa_check_space(loc, xi);
2112 if (rc)
2113 goto out;
2114
2115 if (loc->xl_entry) {
2116 if (ocfs2_xa_can_reuse_entry(loc, xi)) {
Joel Becker399ff3a2009-09-01 18:38:27 -07002117 orig_value_size = loc->xl_entry->xe_value_size;
Joel Becker73857ee2009-08-18 20:26:41 -07002118 rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
2119 if (rc)
2120 goto out;
2121 goto alloc_value;
Joel Becker69a3e532009-08-17 12:24:39 -07002122 }
2123
Joel Becker73857ee2009-08-18 20:26:41 -07002124 if (!ocfs2_xattr_is_local(loc->xl_entry)) {
Joel Becker399ff3a2009-09-01 18:38:27 -07002125 orig_clusters = ocfs2_xa_value_clusters(loc);
Joel Becker73857ee2009-08-18 20:26:41 -07002126 rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
2127 if (rc) {
2128 mlog_errno(rc);
Joel Becker399ff3a2009-09-01 18:38:27 -07002129 ocfs2_xa_cleanup_value_truncate(loc,
2130 "overwriting",
2131 orig_clusters);
Joel Becker73857ee2009-08-18 20:26:41 -07002132 goto out;
2133 }
2134 }
Joel Becker69a3e532009-08-17 12:24:39 -07002135 ocfs2_xa_wipe_namevalue(loc);
2136 } else
2137 ocfs2_xa_add_entry(loc, name_hash);
2138
2139 /*
2140 * If we get here, we have a blank entry. Fill it. We grow our
2141 * name+value pair back from the end.
2142 */
2143 ocfs2_xa_add_namevalue(loc, xi);
Joel Becker73857ee2009-08-18 20:26:41 -07002144 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
2145 ocfs2_xa_install_value_root(loc);
2146
2147alloc_value:
2148 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
Joel Becker399ff3a2009-09-01 18:38:27 -07002149 orig_clusters = ocfs2_xa_value_clusters(loc);
Joel Becker73857ee2009-08-18 20:26:41 -07002150 rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt);
Joel Becker399ff3a2009-09-01 18:38:27 -07002151 if (rc < 0) {
Tao Ma5f5261a2010-05-13 22:49:05 +08002152 ctxt->set_abort = 1;
Joel Becker399ff3a2009-09-01 18:38:27 -07002153 ocfs2_xa_cleanup_value_truncate(loc, "growing",
2154 orig_clusters);
Tao Mad5a7df02010-05-10 18:09:47 +08002155 /*
2156 * If we were growing an existing value,
2157 * ocfs2_xa_cleanup_value_truncate() won't remove
2158 * the entry. We need to restore the original value
2159 * size.
2160 */
2161 if (loc->xl_entry) {
2162 BUG_ON(!orig_value_size);
2163 loc->xl_entry->xe_value_size = orig_value_size;
2164 }
Joel Becker73857ee2009-08-18 20:26:41 -07002165 mlog_errno(rc);
Joel Becker399ff3a2009-09-01 18:38:27 -07002166 }
Joel Becker73857ee2009-08-18 20:26:41 -07002167 }
Joel Becker69a3e532009-08-17 12:24:39 -07002168
2169out:
2170 return rc;
2171}
2172
2173/*
Joel Becker73857ee2009-08-18 20:26:41 -07002174 * Store the value portion of the name+value pair. This will skip
2175 * values that are stored externally. Their tree roots were set up
2176 * by ocfs2_xa_prepare_entry().
Joel Becker69a3e532009-08-17 12:24:39 -07002177 */
Joel Becker73857ee2009-08-18 20:26:41 -07002178static int ocfs2_xa_store_value(struct ocfs2_xa_loc *loc,
2179 struct ocfs2_xattr_info *xi,
2180 struct ocfs2_xattr_set_ctxt *ctxt)
Joel Becker69a3e532009-08-17 12:24:39 -07002181{
Joel Becker73857ee2009-08-18 20:26:41 -07002182 int rc = 0;
Joel Becker69a3e532009-08-17 12:24:39 -07002183 int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
2184 int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
Joel Becker69a3e532009-08-17 12:24:39 -07002185 char *nameval_buf;
Joel Becker73857ee2009-08-18 20:26:41 -07002186 struct ocfs2_xattr_value_buf vb;
Joel Becker69a3e532009-08-17 12:24:39 -07002187
Joel Becker69a3e532009-08-17 12:24:39 -07002188 nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
Joel Becker73857ee2009-08-18 20:26:41 -07002189 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
2190 ocfs2_xa_fill_value_buf(loc, &vb);
2191 rc = __ocfs2_xattr_set_value_outside(loc->xl_inode,
2192 ctxt->handle, &vb,
2193 xi->xi_value,
2194 xi->xi_value_len);
2195 } else
2196 memcpy(nameval_buf + name_size, xi->xi_value, xi->xi_value_len);
2197
Joel Becker73857ee2009-08-18 20:26:41 -07002198 return rc;
Joel Becker69a3e532009-08-17 12:24:39 -07002199}
2200
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002201static int ocfs2_xa_set(struct ocfs2_xa_loc *loc,
2202 struct ocfs2_xattr_info *xi,
2203 struct ocfs2_xattr_set_ctxt *ctxt)
2204{
2205 int ret;
2206 u32 name_hash = ocfs2_xattr_name_hash(loc->xl_inode, xi->xi_name,
2207 xi->xi_name_len);
2208
2209 ret = ocfs2_xa_journal_access(ctxt->handle, loc,
2210 OCFS2_JOURNAL_ACCESS_WRITE);
2211 if (ret) {
2212 mlog_errno(ret);
2213 goto out;
2214 }
2215
Joel Becker399ff3a2009-09-01 18:38:27 -07002216 /*
2217 * From here on out, everything is going to modify the buffer a
2218 * little. Errors are going to leave the xattr header in a
2219 * sane state. Thus, even with errors we dirty the sucker.
2220 */
2221
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002222 /* Don't worry, we are never called with !xi_value and !xl_entry */
2223 if (!xi->xi_value) {
2224 ret = ocfs2_xa_remove(loc, ctxt);
Joel Becker399ff3a2009-09-01 18:38:27 -07002225 goto out_dirty;
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002226 }
2227
2228 ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt);
2229 if (ret) {
2230 if (ret != -ENOSPC)
2231 mlog_errno(ret);
Joel Becker399ff3a2009-09-01 18:38:27 -07002232 goto out_dirty;
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002233 }
2234
2235 ret = ocfs2_xa_store_value(loc, xi, ctxt);
Joel Becker399ff3a2009-09-01 18:38:27 -07002236 if (ret)
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002237 mlog_errno(ret);
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002238
Joel Becker399ff3a2009-09-01 18:38:27 -07002239out_dirty:
Joel Beckerbca5e9b2009-08-18 20:40:14 -07002240 ocfs2_xa_journal_dirty(ctxt->handle, loc);
2241
2242out:
2243 return ret;
2244}
2245
Joel Becker11179f22009-08-14 16:07:44 -07002246static void ocfs2_init_dinode_xa_loc(struct ocfs2_xa_loc *loc,
2247 struct inode *inode,
2248 struct buffer_head *bh,
2249 struct ocfs2_xattr_entry *entry)
2250{
2251 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
2252
Joel Becker139ffac2009-08-19 11:09:17 -07002253 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_XATTR_FL));
2254
Joel Beckercf2bc802009-08-18 13:52:38 -07002255 loc->xl_inode = inode;
Joel Becker11179f22009-08-14 16:07:44 -07002256 loc->xl_ops = &ocfs2_xa_block_loc_ops;
2257 loc->xl_storage = bh;
2258 loc->xl_entry = entry;
Joel Becker139ffac2009-08-19 11:09:17 -07002259 loc->xl_size = le16_to_cpu(di->i_xattr_inline_size);
Joel Becker11179f22009-08-14 16:07:44 -07002260 loc->xl_header =
2261 (struct ocfs2_xattr_header *)(bh->b_data + bh->b_size -
2262 loc->xl_size);
2263}
2264
2265static void ocfs2_init_xattr_block_xa_loc(struct ocfs2_xa_loc *loc,
Joel Beckercf2bc802009-08-18 13:52:38 -07002266 struct inode *inode,
Joel Becker11179f22009-08-14 16:07:44 -07002267 struct buffer_head *bh,
2268 struct ocfs2_xattr_entry *entry)
2269{
2270 struct ocfs2_xattr_block *xb =
2271 (struct ocfs2_xattr_block *)bh->b_data;
2272
2273 BUG_ON(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED);
2274
Joel Beckercf2bc802009-08-18 13:52:38 -07002275 loc->xl_inode = inode;
Joel Becker11179f22009-08-14 16:07:44 -07002276 loc->xl_ops = &ocfs2_xa_block_loc_ops;
2277 loc->xl_storage = bh;
2278 loc->xl_header = &(xb->xb_attrs.xb_header);
2279 loc->xl_entry = entry;
2280 loc->xl_size = bh->b_size - offsetof(struct ocfs2_xattr_block,
2281 xb_attrs.xb_header);
2282}
2283
2284static void ocfs2_init_xattr_bucket_xa_loc(struct ocfs2_xa_loc *loc,
2285 struct ocfs2_xattr_bucket *bucket,
2286 struct ocfs2_xattr_entry *entry)
2287{
Joel Beckercf2bc802009-08-18 13:52:38 -07002288 loc->xl_inode = bucket->bu_inode;
Joel Becker11179f22009-08-14 16:07:44 -07002289 loc->xl_ops = &ocfs2_xa_bucket_loc_ops;
2290 loc->xl_storage = bucket;
2291 loc->xl_header = bucket_xh(bucket);
2292 loc->xl_entry = entry;
2293 loc->xl_size = OCFS2_XATTR_BUCKET_SIZE;
2294}
2295
Tao Mace9c5a52009-08-18 11:43:59 +08002296/*
2297 * In xattr remove, if it is stored outside and refcounted, we may have
2298 * the chance to split the refcount tree. So need the allocators.
2299 */
2300static int ocfs2_lock_xattr_remove_allocators(struct inode *inode,
2301 struct ocfs2_xattr_value_root *xv,
2302 struct ocfs2_caching_info *ref_ci,
2303 struct buffer_head *ref_root_bh,
2304 struct ocfs2_alloc_context **meta_ac,
2305 int *ref_credits)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002306{
Tao Mace9c5a52009-08-18 11:43:59 +08002307 int ret, meta_add = 0;
2308 u32 p_cluster, num_clusters;
2309 unsigned int ext_flags;
Tao Ma78f30c32008-11-12 08:27:00 +08002310
Tao Mace9c5a52009-08-18 11:43:59 +08002311 *ref_credits = 0;
2312 ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
2313 &num_clusters,
2314 &xv->xr_list,
2315 &ext_flags);
2316 if (ret) {
Tao Ma85db90e2008-11-12 08:27:01 +08002317 mlog_errno(ret);
2318 goto out;
2319 }
2320
Tao Mace9c5a52009-08-18 11:43:59 +08002321 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
2322 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002323
Tao Mace9c5a52009-08-18 11:43:59 +08002324 ret = ocfs2_refcounted_xattr_delete_need(inode, ref_ci,
2325 ref_root_bh, xv,
2326 &meta_add, ref_credits);
2327 if (ret) {
2328 mlog_errno(ret);
2329 goto out;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002330 }
2331
Tao Mace9c5a52009-08-18 11:43:59 +08002332 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
2333 meta_add, meta_ac);
2334 if (ret)
2335 mlog_errno(ret);
2336
Tao Ma85db90e2008-11-12 08:27:01 +08002337out:
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002338 return ret;
2339}
2340
Tao Mace9c5a52009-08-18 11:43:59 +08002341static int ocfs2_remove_value_outside(struct inode*inode,
2342 struct ocfs2_xattr_value_buf *vb,
2343 struct ocfs2_xattr_header *header,
2344 struct ocfs2_caching_info *ref_ci,
2345 struct buffer_head *ref_root_bh)
2346{
2347 int ret = 0, i, ref_credits;
2348 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2349 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
2350 void *val;
2351
2352 ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
2353
2354 for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
2355 struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
2356
2357 if (ocfs2_xattr_is_local(entry))
2358 continue;
2359
2360 val = (void *)header +
2361 le16_to_cpu(entry->xe_name_offset);
2362 vb->vb_xv = (struct ocfs2_xattr_value_root *)
2363 (val + OCFS2_XATTR_SIZE(entry->xe_name_len));
2364
2365 ret = ocfs2_lock_xattr_remove_allocators(inode, vb->vb_xv,
2366 ref_ci, ref_root_bh,
2367 &ctxt.meta_ac,
2368 &ref_credits);
2369
2370 ctxt.handle = ocfs2_start_trans(osb, ref_credits +
2371 ocfs2_remove_extent_credits(osb->sb));
2372 if (IS_ERR(ctxt.handle)) {
2373 ret = PTR_ERR(ctxt.handle);
2374 mlog_errno(ret);
2375 break;
2376 }
2377
2378 ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
Tao Mace9c5a52009-08-18 11:43:59 +08002379
2380 ocfs2_commit_trans(osb, ctxt.handle);
2381 if (ctxt.meta_ac) {
2382 ocfs2_free_alloc_context(ctxt.meta_ac);
2383 ctxt.meta_ac = NULL;
2384 }
Wengang Wangb8a0ae52011-10-12 15:22:15 +08002385
2386 if (ret < 0) {
2387 mlog_errno(ret);
2388 break;
2389 }
2390
Tao Mace9c5a52009-08-18 11:43:59 +08002391 }
2392
2393 if (ctxt.meta_ac)
2394 ocfs2_free_alloc_context(ctxt.meta_ac);
2395 ocfs2_schedule_truncate_log_flush(osb, 1);
2396 ocfs2_run_deallocs(osb, &ctxt.dealloc);
2397 return ret;
2398}
2399
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002400static int ocfs2_xattr_ibody_remove(struct inode *inode,
Tao Mace9c5a52009-08-18 11:43:59 +08002401 struct buffer_head *di_bh,
2402 struct ocfs2_caching_info *ref_ci,
2403 struct buffer_head *ref_root_bh)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002404{
2405
2406 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2407 struct ocfs2_xattr_header *header;
2408 int ret;
Joel Becker43119012008-12-09 16:24:43 -08002409 struct ocfs2_xattr_value_buf vb = {
2410 .vb_bh = di_bh,
2411 .vb_access = ocfs2_journal_access_di,
2412 };
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002413
2414 header = (struct ocfs2_xattr_header *)
2415 ((void *)di + inode->i_sb->s_blocksize -
2416 le16_to_cpu(di->i_xattr_inline_size));
2417
Tao Mace9c5a52009-08-18 11:43:59 +08002418 ret = ocfs2_remove_value_outside(inode, &vb, header,
2419 ref_ci, ref_root_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002420
2421 return ret;
2422}
2423
Tao Mace9c5a52009-08-18 11:43:59 +08002424struct ocfs2_rm_xattr_bucket_para {
2425 struct ocfs2_caching_info *ref_ci;
2426 struct buffer_head *ref_root_bh;
2427};
2428
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002429static int ocfs2_xattr_block_remove(struct inode *inode,
Tao Mace9c5a52009-08-18 11:43:59 +08002430 struct buffer_head *blk_bh,
2431 struct ocfs2_caching_info *ref_ci,
2432 struct buffer_head *ref_root_bh)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002433{
2434 struct ocfs2_xattr_block *xb;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002435 int ret = 0;
Joel Becker43119012008-12-09 16:24:43 -08002436 struct ocfs2_xattr_value_buf vb = {
2437 .vb_bh = blk_bh,
2438 .vb_access = ocfs2_journal_access_xb,
2439 };
Tao Mace9c5a52009-08-18 11:43:59 +08002440 struct ocfs2_rm_xattr_bucket_para args = {
2441 .ref_ci = ref_ci,
2442 .ref_root_bh = ref_root_bh,
2443 };
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002444
2445 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
Tao Maa3944252008-08-18 17:38:54 +08002446 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
2447 struct ocfs2_xattr_header *header = &(xb->xb_attrs.xb_header);
Tao Mace9c5a52009-08-18 11:43:59 +08002448 ret = ocfs2_remove_value_outside(inode, &vb, header,
2449 ref_ci, ref_root_bh);
Tao Maa3944252008-08-18 17:38:54 +08002450 } else
Tao Ma47bca492009-08-18 11:43:42 +08002451 ret = ocfs2_iterate_xattr_index_block(inode,
2452 blk_bh,
2453 ocfs2_rm_xattr_cluster,
Tao Mace9c5a52009-08-18 11:43:59 +08002454 &args);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002455
2456 return ret;
2457}
2458
Tao Ma08413892008-08-29 09:00:19 +08002459static int ocfs2_xattr_free_block(struct inode *inode,
Tao Mace9c5a52009-08-18 11:43:59 +08002460 u64 block,
2461 struct ocfs2_caching_info *ref_ci,
2462 struct buffer_head *ref_root_bh)
Tao Ma08413892008-08-29 09:00:19 +08002463{
2464 struct inode *xb_alloc_inode;
2465 struct buffer_head *xb_alloc_bh = NULL;
2466 struct buffer_head *blk_bh = NULL;
2467 struct ocfs2_xattr_block *xb;
2468 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2469 handle_t *handle;
2470 int ret = 0;
2471 u64 blk, bg_blkno;
2472 u16 bit;
2473
Joel Becker4ae1d692008-11-13 14:49:18 -08002474 ret = ocfs2_read_xattr_block(inode, block, &blk_bh);
Tao Ma08413892008-08-29 09:00:19 +08002475 if (ret < 0) {
2476 mlog_errno(ret);
2477 goto out;
2478 }
2479
Tao Mace9c5a52009-08-18 11:43:59 +08002480 ret = ocfs2_xattr_block_remove(inode, blk_bh, ref_ci, ref_root_bh);
Tao Ma08413892008-08-29 09:00:19 +08002481 if (ret < 0) {
2482 mlog_errno(ret);
2483 goto out;
2484 }
2485
Joel Becker4ae1d692008-11-13 14:49:18 -08002486 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
Tao Ma08413892008-08-29 09:00:19 +08002487 blk = le64_to_cpu(xb->xb_blkno);
2488 bit = le16_to_cpu(xb->xb_suballoc_bit);
Tao Ma74380c42010-03-22 14:20:18 +08002489 if (xb->xb_suballoc_loc)
2490 bg_blkno = le64_to_cpu(xb->xb_suballoc_loc);
2491 else
2492 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
Tao Ma08413892008-08-29 09:00:19 +08002493
2494 xb_alloc_inode = ocfs2_get_system_file_inode(osb,
2495 EXTENT_ALLOC_SYSTEM_INODE,
2496 le16_to_cpu(xb->xb_suballoc_slot));
2497 if (!xb_alloc_inode) {
2498 ret = -ENOMEM;
2499 mlog_errno(ret);
2500 goto out;
2501 }
2502 mutex_lock(&xb_alloc_inode->i_mutex);
2503
2504 ret = ocfs2_inode_lock(xb_alloc_inode, &xb_alloc_bh, 1);
2505 if (ret < 0) {
2506 mlog_errno(ret);
2507 goto out_mutex;
2508 }
2509
2510 handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
2511 if (IS_ERR(handle)) {
2512 ret = PTR_ERR(handle);
2513 mlog_errno(ret);
2514 goto out_unlock;
2515 }
2516
2517 ret = ocfs2_free_suballoc_bits(handle, xb_alloc_inode, xb_alloc_bh,
2518 bit, bg_blkno, 1);
2519 if (ret < 0)
2520 mlog_errno(ret);
2521
2522 ocfs2_commit_trans(osb, handle);
2523out_unlock:
2524 ocfs2_inode_unlock(xb_alloc_inode, 1);
2525 brelse(xb_alloc_bh);
2526out_mutex:
2527 mutex_unlock(&xb_alloc_inode->i_mutex);
2528 iput(xb_alloc_inode);
2529out:
2530 brelse(blk_bh);
2531 return ret;
2532}
2533
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002534/*
2535 * ocfs2_xattr_remove()
2536 *
2537 * Free extended attribute resources associated with this inode.
2538 */
2539int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
2540{
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002541 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2542 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
Tao Mace9c5a52009-08-18 11:43:59 +08002543 struct ocfs2_refcount_tree *ref_tree = NULL;
2544 struct buffer_head *ref_root_bh = NULL;
2545 struct ocfs2_caching_info *ref_ci = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002546 handle_t *handle;
2547 int ret;
2548
Tiger Yang8154da32008-08-18 17:11:46 +08002549 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
2550 return 0;
2551
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002552 if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
2553 return 0;
2554
Tao Mace9c5a52009-08-18 11:43:59 +08002555 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
2556 ret = ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb),
2557 le64_to_cpu(di->i_refcount_loc),
2558 1, &ref_tree, &ref_root_bh);
2559 if (ret) {
2560 mlog_errno(ret);
2561 goto out;
2562 }
2563 ref_ci = &ref_tree->rf_ci;
2564
2565 }
2566
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002567 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
Tao Mace9c5a52009-08-18 11:43:59 +08002568 ret = ocfs2_xattr_ibody_remove(inode, di_bh,
2569 ref_ci, ref_root_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002570 if (ret < 0) {
2571 mlog_errno(ret);
2572 goto out;
2573 }
2574 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002575
Tao Ma08413892008-08-29 09:00:19 +08002576 if (di->i_xattr_loc) {
2577 ret = ocfs2_xattr_free_block(inode,
Tao Mace9c5a52009-08-18 11:43:59 +08002578 le64_to_cpu(di->i_xattr_loc),
2579 ref_ci, ref_root_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002580 if (ret < 0) {
2581 mlog_errno(ret);
2582 goto out;
2583 }
2584 }
2585
2586 handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)),
2587 OCFS2_INODE_UPDATE_CREDITS);
2588 if (IS_ERR(handle)) {
2589 ret = PTR_ERR(handle);
2590 mlog_errno(ret);
2591 goto out;
2592 }
Joel Becker0cf2f762009-02-12 16:41:25 -08002593 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
Joel Becker84008972008-12-09 16:11:49 -08002594 OCFS2_JOURNAL_ACCESS_WRITE);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002595 if (ret) {
2596 mlog_errno(ret);
2597 goto out_commit;
2598 }
2599
Tao Ma08413892008-08-29 09:00:19 +08002600 di->i_xattr_loc = 0;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002601
2602 spin_lock(&oi->ip_lock);
2603 oi->ip_dyn_features &= ~(OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL);
2604 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2605 spin_unlock(&oi->ip_lock);
2606
Joel Beckerec20cec2010-03-19 14:13:52 -07002607 ocfs2_journal_dirty(handle, di_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002608out_commit:
2609 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
2610out:
Tao Mace9c5a52009-08-18 11:43:59 +08002611 if (ref_tree)
2612 ocfs2_unlock_refcount_tree(OCFS2_SB(inode->i_sb), ref_tree, 1);
2613 brelse(ref_root_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002614 return ret;
2615}
2616
2617static int ocfs2_xattr_has_space_inline(struct inode *inode,
2618 struct ocfs2_dinode *di)
2619{
2620 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2621 unsigned int xattrsize = OCFS2_SB(inode->i_sb)->s_xattr_inline_size;
2622 int free;
2623
2624 if (xattrsize < OCFS2_MIN_XATTR_INLINE_SIZE)
2625 return 0;
2626
2627 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2628 struct ocfs2_inline_data *idata = &di->id2.i_data;
2629 free = le16_to_cpu(idata->id_count) - le64_to_cpu(di->i_size);
2630 } else if (ocfs2_inode_is_fast_symlink(inode)) {
2631 free = ocfs2_fast_symlink_chars(inode->i_sb) -
2632 le64_to_cpu(di->i_size);
2633 } else {
2634 struct ocfs2_extent_list *el = &di->id2.i_list;
2635 free = (le16_to_cpu(el->l_count) -
2636 le16_to_cpu(el->l_next_free_rec)) *
2637 sizeof(struct ocfs2_extent_rec);
2638 }
2639 if (free >= xattrsize)
2640 return 1;
2641
2642 return 0;
2643}
2644
2645/*
2646 * ocfs2_xattr_ibody_find()
2647 *
2648 * Find extended attribute in inode block and
2649 * fill search info into struct ocfs2_xattr_search.
2650 */
2651static int ocfs2_xattr_ibody_find(struct inode *inode,
2652 int name_index,
2653 const char *name,
2654 struct ocfs2_xattr_search *xs)
2655{
2656 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2657 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
2658 int ret;
2659 int has_space = 0;
2660
2661 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
2662 return 0;
2663
2664 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
2665 down_read(&oi->ip_alloc_sem);
2666 has_space = ocfs2_xattr_has_space_inline(inode, di);
2667 up_read(&oi->ip_alloc_sem);
2668 if (!has_space)
2669 return 0;
2670 }
2671
2672 xs->xattr_bh = xs->inode_bh;
2673 xs->end = (void *)di + inode->i_sb->s_blocksize;
2674 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)
2675 xs->header = (struct ocfs2_xattr_header *)
2676 (xs->end - le16_to_cpu(di->i_xattr_inline_size));
2677 else
2678 xs->header = (struct ocfs2_xattr_header *)
2679 (xs->end - OCFS2_SB(inode->i_sb)->s_xattr_inline_size);
2680 xs->base = (void *)xs->header;
2681 xs->here = xs->header->xh_entries;
2682
2683 /* Find the named attribute. */
2684 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
2685 ret = ocfs2_xattr_find_entry(name_index, name, xs);
2686 if (ret && ret != -ENODATA)
2687 return ret;
2688 xs->not_found = ret;
2689 }
2690
2691 return 0;
2692}
2693
Joel Becker139ffac2009-08-19 11:09:17 -07002694static int ocfs2_xattr_ibody_init(struct inode *inode,
2695 struct buffer_head *di_bh,
2696 struct ocfs2_xattr_set_ctxt *ctxt)
2697{
2698 int ret;
2699 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2700 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2701 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2702 unsigned int xattrsize = osb->s_xattr_inline_size;
2703
2704 if (!ocfs2_xattr_has_space_inline(inode, di)) {
2705 ret = -ENOSPC;
2706 goto out;
2707 }
2708
2709 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh,
2710 OCFS2_JOURNAL_ACCESS_WRITE);
2711 if (ret) {
2712 mlog_errno(ret);
2713 goto out;
2714 }
2715
2716 /*
2717 * Adjust extent record count or inline data size
2718 * to reserve space for extended attribute.
2719 */
2720 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2721 struct ocfs2_inline_data *idata = &di->id2.i_data;
2722 le16_add_cpu(&idata->id_count, -xattrsize);
2723 } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
2724 struct ocfs2_extent_list *el = &di->id2.i_list;
2725 le16_add_cpu(&el->l_count, -(xattrsize /
2726 sizeof(struct ocfs2_extent_rec)));
2727 }
2728 di->i_xattr_inline_size = cpu_to_le16(xattrsize);
2729
2730 spin_lock(&oi->ip_lock);
2731 oi->ip_dyn_features |= OCFS2_INLINE_XATTR_FL|OCFS2_HAS_XATTR_FL;
2732 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
2733 spin_unlock(&oi->ip_lock);
2734
Joel Beckerec20cec2010-03-19 14:13:52 -07002735 ocfs2_journal_dirty(ctxt->handle, di_bh);
Joel Becker139ffac2009-08-19 11:09:17 -07002736
2737out:
2738 return ret;
2739}
2740
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002741/*
2742 * ocfs2_xattr_ibody_set()
2743 *
2744 * Set, replace or remove an extended attribute into inode block.
2745 *
2746 */
2747static int ocfs2_xattr_ibody_set(struct inode *inode,
2748 struct ocfs2_xattr_info *xi,
Tao Ma78f30c32008-11-12 08:27:00 +08002749 struct ocfs2_xattr_search *xs,
2750 struct ocfs2_xattr_set_ctxt *ctxt)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002751{
Joel Becker139ffac2009-08-19 11:09:17 -07002752 int ret;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002753 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Joel Becker139ffac2009-08-19 11:09:17 -07002754 struct ocfs2_xa_loc loc;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002755
2756 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
2757 return -ENOSPC;
2758
2759 down_write(&oi->ip_alloc_sem);
2760 if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
Joel Becker139ffac2009-08-19 11:09:17 -07002761 ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt);
2762 if (ret) {
2763 if (ret != -ENOSPC)
2764 mlog_errno(ret);
2765 goto out;
2766 }
2767 }
2768
2769 ocfs2_init_dinode_xa_loc(&loc, inode, xs->inode_bh,
2770 xs->not_found ? NULL : xs->here);
2771 ret = ocfs2_xa_set(&loc, xi, ctxt);
2772 if (ret) {
2773 if (ret != -ENOSPC)
2774 mlog_errno(ret);
2775 goto out;
2776 }
2777 xs->here = loc.xl_entry;
2778
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002779out:
2780 up_write(&oi->ip_alloc_sem);
2781
2782 return ret;
2783}
2784
2785/*
2786 * ocfs2_xattr_block_find()
2787 *
2788 * Find extended attribute in external block and
2789 * fill search info into struct ocfs2_xattr_search.
2790 */
2791static int ocfs2_xattr_block_find(struct inode *inode,
2792 int name_index,
2793 const char *name,
2794 struct ocfs2_xattr_search *xs)
2795{
2796 struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
2797 struct buffer_head *blk_bh = NULL;
Tao Ma589dc262008-08-18 17:38:51 +08002798 struct ocfs2_xattr_block *xb;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002799 int ret = 0;
2800
2801 if (!di->i_xattr_loc)
2802 return ret;
2803
Joel Becker4ae1d692008-11-13 14:49:18 -08002804 ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
2805 &blk_bh);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002806 if (ret < 0) {
2807 mlog_errno(ret);
2808 return ret;
2809 }
Joel Beckerf6087fb2008-10-20 18:20:43 -07002810
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002811 xs->xattr_bh = blk_bh;
Joel Becker4ae1d692008-11-13 14:49:18 -08002812 xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002813
Tao Ma589dc262008-08-18 17:38:51 +08002814 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
2815 xs->header = &xb->xb_attrs.xb_header;
2816 xs->base = (void *)xs->header;
2817 xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size;
2818 xs->here = xs->header->xh_entries;
2819
2820 ret = ocfs2_xattr_find_entry(name_index, name, xs);
2821 } else
2822 ret = ocfs2_xattr_index_block_find(inode, blk_bh,
2823 name_index,
2824 name, xs);
2825
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002826 if (ret && ret != -ENODATA) {
2827 xs->xattr_bh = NULL;
2828 goto cleanup;
2829 }
2830 xs->not_found = ret;
2831 return 0;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002832cleanup:
2833 brelse(blk_bh);
2834
2835 return ret;
2836}
2837
Joel Beckerd3981542009-08-19 02:13:50 -07002838static int ocfs2_create_xattr_block(struct inode *inode,
Tao Ma5aea1f02009-08-18 11:43:24 +08002839 struct buffer_head *inode_bh,
Joel Beckerd3981542009-08-19 02:13:50 -07002840 struct ocfs2_xattr_set_ctxt *ctxt,
2841 int indexed,
2842 struct buffer_head **ret_bh)
Tao Ma5aea1f02009-08-18 11:43:24 +08002843{
2844 int ret;
2845 u16 suballoc_bit_start;
2846 u32 num_got;
Joel Becker2b6cb572010-03-26 10:09:15 +08002847 u64 suballoc_loc, first_blkno;
Tao Ma5aea1f02009-08-18 11:43:24 +08002848 struct ocfs2_dinode *di = (struct ocfs2_dinode *)inode_bh->b_data;
Tao Ma5aea1f02009-08-18 11:43:24 +08002849 struct buffer_head *new_bh = NULL;
2850 struct ocfs2_xattr_block *xblk;
2851
Joel Beckerd3981542009-08-19 02:13:50 -07002852 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
2853 inode_bh, OCFS2_JOURNAL_ACCESS_CREATE);
Tao Ma5aea1f02009-08-18 11:43:24 +08002854 if (ret < 0) {
2855 mlog_errno(ret);
2856 goto end;
2857 }
2858
Joel Becker1ed9b772010-05-06 13:59:06 +08002859 ret = ocfs2_claim_metadata(ctxt->handle, ctxt->meta_ac, 1,
Joel Becker2b6cb572010-03-26 10:09:15 +08002860 &suballoc_loc, &suballoc_bit_start,
2861 &num_got, &first_blkno);
Tao Ma5aea1f02009-08-18 11:43:24 +08002862 if (ret < 0) {
2863 mlog_errno(ret);
2864 goto end;
2865 }
2866
2867 new_bh = sb_getblk(inode->i_sb, first_blkno);
2868 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
2869
Joel Beckerd3981542009-08-19 02:13:50 -07002870 ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode),
Tao Ma5aea1f02009-08-18 11:43:24 +08002871 new_bh,
2872 OCFS2_JOURNAL_ACCESS_CREATE);
2873 if (ret < 0) {
2874 mlog_errno(ret);
2875 goto end;
2876 }
2877
2878 /* Initialize ocfs2_xattr_block */
2879 xblk = (struct ocfs2_xattr_block *)new_bh->b_data;
2880 memset(xblk, 0, inode->i_sb->s_blocksize);
2881 strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
Joel Beckerd3981542009-08-19 02:13:50 -07002882 xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot);
Joel Becker2b6cb572010-03-26 10:09:15 +08002883 xblk->xb_suballoc_loc = cpu_to_le64(suballoc_loc);
Tao Ma5aea1f02009-08-18 11:43:24 +08002884 xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
Joel Becker1ed9b772010-05-06 13:59:06 +08002885 xblk->xb_fs_generation =
2886 cpu_to_le32(OCFS2_SB(inode->i_sb)->fs_generation);
Tao Ma5aea1f02009-08-18 11:43:24 +08002887 xblk->xb_blkno = cpu_to_le64(first_blkno);
Tao Maa7fe7a32009-08-18 11:43:52 +08002888 if (indexed) {
2889 struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root;
2890 xr->xt_clusters = cpu_to_le32(1);
2891 xr->xt_last_eb_blk = 0;
2892 xr->xt_list.l_tree_depth = 0;
2893 xr->xt_list.l_count = cpu_to_le16(
2894 ocfs2_xattr_recs_per_xb(inode->i_sb));
2895 xr->xt_list.l_next_free_rec = cpu_to_le16(1);
2896 xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED);
2897 }
Joel Beckerd3981542009-08-19 02:13:50 -07002898 ocfs2_journal_dirty(ctxt->handle, new_bh);
Tao Maa7fe7a32009-08-18 11:43:52 +08002899
Joel Beckerd3981542009-08-19 02:13:50 -07002900 /* Add it to the inode */
Tao Ma5aea1f02009-08-18 11:43:24 +08002901 di->i_xattr_loc = cpu_to_le64(first_blkno);
Joel Beckerd3981542009-08-19 02:13:50 -07002902
2903 spin_lock(&OCFS2_I(inode)->ip_lock);
2904 OCFS2_I(inode)->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
2905 di->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features);
2906 spin_unlock(&OCFS2_I(inode)->ip_lock);
2907
2908 ocfs2_journal_dirty(ctxt->handle, inode_bh);
Tao Ma5aea1f02009-08-18 11:43:24 +08002909
2910 *ret_bh = new_bh;
2911 new_bh = NULL;
2912
2913end:
2914 brelse(new_bh);
2915 return ret;
2916}
2917
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002918/*
2919 * ocfs2_xattr_block_set()
2920 *
2921 * Set, replace or remove an extended attribute into external block.
2922 *
2923 */
2924static int ocfs2_xattr_block_set(struct inode *inode,
2925 struct ocfs2_xattr_info *xi,
Tao Ma78f30c32008-11-12 08:27:00 +08002926 struct ocfs2_xattr_search *xs,
2927 struct ocfs2_xattr_set_ctxt *ctxt)
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002928{
2929 struct buffer_head *new_bh = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002930 struct ocfs2_xattr_block *xblk = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002931 int ret;
Joel Beckerd3981542009-08-19 02:13:50 -07002932 struct ocfs2_xa_loc loc;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002933
2934 if (!xs->xattr_bh) {
Joel Beckerd3981542009-08-19 02:13:50 -07002935 ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt,
2936 0, &new_bh);
Tao Ma5aea1f02009-08-18 11:43:24 +08002937 if (ret) {
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002938 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08002939 goto end;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002940 }
2941
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002942 xs->xattr_bh = new_bh;
Tao Ma5aea1f02009-08-18 11:43:24 +08002943 xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002944 xs->header = &xblk->xb_attrs.xb_header;
2945 xs->base = (void *)xs->header;
2946 xs->end = (void *)xblk + inode->i_sb->s_blocksize;
2947 xs->here = xs->header->xh_entries;
Tao Ma01225592008-08-18 17:38:53 +08002948 } else
2949 xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
2950
2951 if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) {
Joel Beckerd3981542009-08-19 02:13:50 -07002952 ocfs2_init_xattr_block_xa_loc(&loc, inode, xs->xattr_bh,
2953 xs->not_found ? NULL : xs->here);
Tao Ma01225592008-08-18 17:38:53 +08002954
Joel Beckerd3981542009-08-19 02:13:50 -07002955 ret = ocfs2_xa_set(&loc, xi, ctxt);
2956 if (!ret)
2957 xs->here = loc.xl_entry;
Tao Ma5f5261a2010-05-13 22:49:05 +08002958 else if ((ret != -ENOSPC) || ctxt->set_abort)
Tao Ma01225592008-08-18 17:38:53 +08002959 goto end;
Joel Beckerd3981542009-08-19 02:13:50 -07002960 else {
2961 ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
2962 if (ret)
2963 goto end;
2964 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002965 }
2966
Joel Beckerd3981542009-08-19 02:13:50 -07002967 if (le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)
2968 ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
Tao Ma01225592008-08-18 17:38:53 +08002969
2970end:
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002971 return ret;
2972}
2973
Tao Ma78f30c32008-11-12 08:27:00 +08002974/* Check whether the new xattr can be inserted into the inode. */
2975static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
2976 struct ocfs2_xattr_info *xi,
2977 struct ocfs2_xattr_search *xs)
2978{
Tao Ma78f30c32008-11-12 08:27:00 +08002979 struct ocfs2_xattr_entry *last;
2980 int free, i;
2981 size_t min_offs = xs->end - xs->base;
2982
2983 if (!xs->header)
2984 return 0;
2985
2986 last = xs->header->xh_entries;
2987
2988 for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
2989 size_t offs = le16_to_cpu(last->xe_name_offset);
2990 if (offs < min_offs)
2991 min_offs = offs;
2992 last += 1;
2993 }
2994
Tiger Yang4442f512009-02-20 11:11:50 +08002995 free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
Tao Ma78f30c32008-11-12 08:27:00 +08002996 if (free < 0)
2997 return 0;
2998
2999 BUG_ON(!xs->not_found);
3000
Joel Becker199799a2009-08-14 19:04:15 -07003001 if (free >= (sizeof(struct ocfs2_xattr_entry) + namevalue_size_xi(xi)))
Tao Ma78f30c32008-11-12 08:27:00 +08003002 return 1;
3003
3004 return 0;
3005}
3006
3007static int ocfs2_calc_xattr_set_need(struct inode *inode,
3008 struct ocfs2_dinode *di,
3009 struct ocfs2_xattr_info *xi,
3010 struct ocfs2_xattr_search *xis,
3011 struct ocfs2_xattr_search *xbs,
3012 int *clusters_need,
Tao Ma85db90e2008-11-12 08:27:01 +08003013 int *meta_need,
3014 int *credits_need)
Tao Ma78f30c32008-11-12 08:27:00 +08003015{
3016 int ret = 0, old_in_xb = 0;
Tao Ma85db90e2008-11-12 08:27:01 +08003017 int clusters_add = 0, meta_add = 0, credits = 0;
Tao Ma78f30c32008-11-12 08:27:00 +08003018 struct buffer_head *bh = NULL;
3019 struct ocfs2_xattr_block *xb = NULL;
3020 struct ocfs2_xattr_entry *xe = NULL;
3021 struct ocfs2_xattr_value_root *xv = NULL;
3022 char *base = NULL;
3023 int name_offset, name_len = 0;
3024 u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
Joel Becker6b240ff2009-08-14 18:02:52 -07003025 xi->xi_value_len);
Tao Ma78f30c32008-11-12 08:27:00 +08003026 u64 value_size;
3027
Tao Ma71d548a2008-12-05 06:20:54 +08003028 /*
3029 * Calculate the clusters we need to write.
3030 * No matter whether we replace an old one or add a new one,
3031 * we need this for writing.
3032 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003033 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
Tao Ma71d548a2008-12-05 06:20:54 +08003034 credits += new_clusters *
3035 ocfs2_clusters_to_blocks(inode->i_sb, 1);
3036
Tao Ma78f30c32008-11-12 08:27:00 +08003037 if (xis->not_found && xbs->not_found) {
Tao Ma85db90e2008-11-12 08:27:01 +08003038 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3039
Joel Becker6b240ff2009-08-14 18:02:52 -07003040 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
Tao Ma78f30c32008-11-12 08:27:00 +08003041 clusters_add += new_clusters;
Tao Ma85db90e2008-11-12 08:27:01 +08003042 credits += ocfs2_calc_extend_credits(inode->i_sb,
3043 &def_xv.xv.xr_list,
3044 new_clusters);
3045 }
Tao Ma78f30c32008-11-12 08:27:00 +08003046
3047 goto meta_guess;
3048 }
3049
3050 if (!xis->not_found) {
3051 xe = xis->here;
3052 name_offset = le16_to_cpu(xe->xe_name_offset);
3053 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
3054 base = xis->base;
Tao Ma85db90e2008-11-12 08:27:01 +08003055 credits += OCFS2_INODE_UPDATE_CREDITS;
Tao Ma78f30c32008-11-12 08:27:00 +08003056 } else {
Joel Becker970e4932008-11-13 14:49:19 -08003057 int i, block_off = 0;
Tao Ma78f30c32008-11-12 08:27:00 +08003058 xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
3059 xe = xbs->here;
3060 name_offset = le16_to_cpu(xe->xe_name_offset);
3061 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
3062 i = xbs->here - xbs->header->xh_entries;
3063 old_in_xb = 1;
3064
3065 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
Tao Mafd68a892009-08-18 11:43:21 +08003066 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Tao Ma78f30c32008-11-12 08:27:00 +08003067 bucket_xh(xbs->bucket),
3068 i, &block_off,
3069 &name_offset);
3070 base = bucket_block(xbs->bucket, block_off);
Tao Ma85db90e2008-11-12 08:27:01 +08003071 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3072 } else {
Tao Ma78f30c32008-11-12 08:27:00 +08003073 base = xbs->base;
Tao Ma85db90e2008-11-12 08:27:01 +08003074 credits += OCFS2_XATTR_BLOCK_UPDATE_CREDITS;
3075 }
3076 }
3077
3078 /*
3079 * delete a xattr doesn't need metadata and cluster allocation.
3080 * so just calculate the credits and return.
3081 *
3082 * The credits for removing the value tree will be extended
3083 * by ocfs2_remove_extent itself.
3084 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003085 if (!xi->xi_value) {
Tao Ma85db90e2008-11-12 08:27:01 +08003086 if (!ocfs2_xattr_is_local(xe))
Jan Karaa90714c2008-10-09 19:38:40 +02003087 credits += ocfs2_remove_extent_credits(inode->i_sb);
Tao Ma85db90e2008-11-12 08:27:01 +08003088
3089 goto out;
Tao Ma78f30c32008-11-12 08:27:00 +08003090 }
3091
3092 /* do cluster allocation guess first. */
3093 value_size = le64_to_cpu(xe->xe_value_size);
3094
3095 if (old_in_xb) {
3096 /*
3097 * In xattr set, we always try to set the xe in inode first,
3098 * so if it can be inserted into inode successfully, the old
3099 * one will be removed from the xattr block, and this xattr
3100 * will be inserted into inode as a new xattr in inode.
3101 */
3102 if (ocfs2_xattr_can_be_in_inode(inode, xi, xis)) {
3103 clusters_add += new_clusters;
Jan Karaa90714c2008-10-09 19:38:40 +02003104 credits += ocfs2_remove_extent_credits(inode->i_sb) +
Tao Ma85db90e2008-11-12 08:27:01 +08003105 OCFS2_INODE_UPDATE_CREDITS;
3106 if (!ocfs2_xattr_is_local(xe))
3107 credits += ocfs2_calc_extend_credits(
3108 inode->i_sb,
3109 &def_xv.xv.xr_list,
3110 new_clusters);
Tao Ma78f30c32008-11-12 08:27:00 +08003111 goto out;
3112 }
3113 }
3114
Joel Becker6b240ff2009-08-14 18:02:52 -07003115 if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
Tao Ma78f30c32008-11-12 08:27:00 +08003116 /* the new values will be stored outside. */
3117 u32 old_clusters = 0;
3118
3119 if (!ocfs2_xattr_is_local(xe)) {
3120 old_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
3121 value_size);
3122 xv = (struct ocfs2_xattr_value_root *)
3123 (base + name_offset + name_len);
Tao Ma97aff522008-11-19 16:48:41 +08003124 value_size = OCFS2_XATTR_ROOT_SIZE;
Tao Ma78f30c32008-11-12 08:27:00 +08003125 } else
3126 xv = &def_xv.xv;
3127
Tao Ma85db90e2008-11-12 08:27:01 +08003128 if (old_clusters >= new_clusters) {
Jan Karaa90714c2008-10-09 19:38:40 +02003129 credits += ocfs2_remove_extent_credits(inode->i_sb);
Tao Ma78f30c32008-11-12 08:27:00 +08003130 goto out;
Tao Ma85db90e2008-11-12 08:27:01 +08003131 } else {
Tao Ma78f30c32008-11-12 08:27:00 +08003132 meta_add += ocfs2_extend_meta_needed(&xv->xr_list);
3133 clusters_add += new_clusters - old_clusters;
Tao Ma85db90e2008-11-12 08:27:01 +08003134 credits += ocfs2_calc_extend_credits(inode->i_sb,
3135 &xv->xr_list,
3136 new_clusters -
3137 old_clusters);
Tao Ma97aff522008-11-19 16:48:41 +08003138 if (value_size >= OCFS2_XATTR_ROOT_SIZE)
3139 goto out;
Tao Ma78f30c32008-11-12 08:27:00 +08003140 }
3141 } else {
3142 /*
3143 * Now the new value will be stored inside. So if the new
3144 * value is smaller than the size of value root or the old
3145 * value, we don't need any allocation, otherwise we have
3146 * to guess metadata allocation.
3147 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003148 if ((ocfs2_xattr_is_local(xe) &&
3149 (value_size >= xi->xi_value_len)) ||
Tao Ma78f30c32008-11-12 08:27:00 +08003150 (!ocfs2_xattr_is_local(xe) &&
Joel Becker6b240ff2009-08-14 18:02:52 -07003151 OCFS2_XATTR_ROOT_SIZE >= xi->xi_value_len))
Tao Ma78f30c32008-11-12 08:27:00 +08003152 goto out;
3153 }
3154
3155meta_guess:
3156 /* calculate metadata allocation. */
3157 if (di->i_xattr_loc) {
3158 if (!xbs->xattr_bh) {
Joel Becker4ae1d692008-11-13 14:49:18 -08003159 ret = ocfs2_read_xattr_block(inode,
3160 le64_to_cpu(di->i_xattr_loc),
3161 &bh);
Tao Ma78f30c32008-11-12 08:27:00 +08003162 if (ret) {
3163 mlog_errno(ret);
3164 goto out;
3165 }
3166
3167 xb = (struct ocfs2_xattr_block *)bh->b_data;
3168 } else
3169 xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
3170
Tao Ma90cb5462008-12-05 06:20:56 +08003171 /*
3172 * If there is already an xattr tree, good, we can calculate
3173 * like other b-trees. Otherwise we may have the chance of
3174 * create a tree, the credit calculation is borrowed from
3175 * ocfs2_calc_extend_credits with root_el = NULL. And the
3176 * new tree will be cluster based, so no meta is needed.
3177 */
Tao Ma78f30c32008-11-12 08:27:00 +08003178 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
3179 struct ocfs2_extent_list *el =
3180 &xb->xb_attrs.xb_root.xt_list;
3181 meta_add += ocfs2_extend_meta_needed(el);
Tao Ma85db90e2008-11-12 08:27:01 +08003182 credits += ocfs2_calc_extend_credits(inode->i_sb,
3183 el, 1);
Tao Ma90cb5462008-12-05 06:20:56 +08003184 } else
3185 credits += OCFS2_SUBALLOC_ALLOC + 1;
Tao Ma78f30c32008-11-12 08:27:00 +08003186
3187 /*
3188 * This cluster will be used either for new bucket or for
3189 * new xattr block.
3190 * If the cluster size is the same as the bucket size, one
3191 * more is needed since we may need to extend the bucket
3192 * also.
3193 */
3194 clusters_add += 1;
Tao Ma85db90e2008-11-12 08:27:01 +08003195 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Ma78f30c32008-11-12 08:27:00 +08003196 if (OCFS2_XATTR_BUCKET_SIZE ==
Tao Ma85db90e2008-11-12 08:27:01 +08003197 OCFS2_SB(inode->i_sb)->s_clustersize) {
3198 credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Ma78f30c32008-11-12 08:27:00 +08003199 clusters_add += 1;
Tao Ma85db90e2008-11-12 08:27:01 +08003200 }
3201 } else {
Tao Ma78f30c32008-11-12 08:27:00 +08003202 meta_add += 1;
Tao Ma85db90e2008-11-12 08:27:01 +08003203 credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
3204 }
Tao Ma78f30c32008-11-12 08:27:00 +08003205out:
3206 if (clusters_need)
3207 *clusters_need = clusters_add;
3208 if (meta_need)
3209 *meta_need = meta_add;
Tao Ma85db90e2008-11-12 08:27:01 +08003210 if (credits_need)
3211 *credits_need = credits;
Tao Ma78f30c32008-11-12 08:27:00 +08003212 brelse(bh);
3213 return ret;
3214}
3215
3216static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
3217 struct ocfs2_dinode *di,
3218 struct ocfs2_xattr_info *xi,
3219 struct ocfs2_xattr_search *xis,
3220 struct ocfs2_xattr_search *xbs,
Tao Ma85db90e2008-11-12 08:27:01 +08003221 struct ocfs2_xattr_set_ctxt *ctxt,
Tao Ma492a8a32009-08-18 11:43:17 +08003222 int extra_meta,
Tao Ma85db90e2008-11-12 08:27:01 +08003223 int *credits)
Tao Ma78f30c32008-11-12 08:27:00 +08003224{
3225 int clusters_add, meta_add, ret;
3226 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3227
3228 memset(ctxt, 0, sizeof(struct ocfs2_xattr_set_ctxt));
3229
3230 ocfs2_init_dealloc_ctxt(&ctxt->dealloc);
3231
3232 ret = ocfs2_calc_xattr_set_need(inode, di, xi, xis, xbs,
Tao Ma85db90e2008-11-12 08:27:01 +08003233 &clusters_add, &meta_add, credits);
Tao Ma78f30c32008-11-12 08:27:00 +08003234 if (ret) {
3235 mlog_errno(ret);
3236 return ret;
3237 }
3238
Tao Ma492a8a32009-08-18 11:43:17 +08003239 meta_add += extra_meta;
Tao Ma402b4182011-02-23 22:01:17 +08003240 trace_ocfs2_init_xattr_set_ctxt(xi->xi_name, meta_add,
3241 clusters_add, *credits);
Tao Ma78f30c32008-11-12 08:27:00 +08003242
3243 if (meta_add) {
3244 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
3245 &ctxt->meta_ac);
3246 if (ret) {
3247 mlog_errno(ret);
3248 goto out;
3249 }
3250 }
3251
3252 if (clusters_add) {
3253 ret = ocfs2_reserve_clusters(osb, clusters_add, &ctxt->data_ac);
3254 if (ret)
3255 mlog_errno(ret);
3256 }
3257out:
3258 if (ret) {
3259 if (ctxt->meta_ac) {
3260 ocfs2_free_alloc_context(ctxt->meta_ac);
3261 ctxt->meta_ac = NULL;
3262 }
3263
3264 /*
3265 * We cannot have an error and a non null ctxt->data_ac.
3266 */
3267 }
3268
3269 return ret;
3270}
3271
Tao Ma85db90e2008-11-12 08:27:01 +08003272static int __ocfs2_xattr_set_handle(struct inode *inode,
3273 struct ocfs2_dinode *di,
3274 struct ocfs2_xattr_info *xi,
3275 struct ocfs2_xattr_search *xis,
3276 struct ocfs2_xattr_search *xbs,
3277 struct ocfs2_xattr_set_ctxt *ctxt)
3278{
Tao Ma9f868f12008-11-19 16:48:42 +08003279 int ret = 0, credits, old_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003280
Joel Becker6b240ff2009-08-14 18:02:52 -07003281 if (!xi->xi_value) {
Tao Ma85db90e2008-11-12 08:27:01 +08003282 /* Remove existing extended attribute */
3283 if (!xis->not_found)
3284 ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
3285 else if (!xbs->not_found)
3286 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
3287 } else {
3288 /* We always try to set extended attribute into inode first*/
3289 ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
3290 if (!ret && !xbs->not_found) {
3291 /*
3292 * If succeed and that extended attribute existing in
3293 * external block, then we will remove it.
3294 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003295 xi->xi_value = NULL;
3296 xi->xi_value_len = 0;
Tao Ma85db90e2008-11-12 08:27:01 +08003297
Tao Ma9f868f12008-11-19 16:48:42 +08003298 old_found = xis->not_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003299 xis->not_found = -ENODATA;
3300 ret = ocfs2_calc_xattr_set_need(inode,
3301 di,
3302 xi,
3303 xis,
3304 xbs,
3305 NULL,
3306 NULL,
3307 &credits);
Tao Ma9f868f12008-11-19 16:48:42 +08003308 xis->not_found = old_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003309 if (ret) {
3310 mlog_errno(ret);
3311 goto out;
3312 }
3313
Tao Mac901fb02010-04-26 14:34:57 +08003314 ret = ocfs2_extend_trans(ctxt->handle, credits);
Tao Ma85db90e2008-11-12 08:27:01 +08003315 if (ret) {
3316 mlog_errno(ret);
3317 goto out;
3318 }
3319 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
Tao Ma5f5261a2010-05-13 22:49:05 +08003320 } else if ((ret == -ENOSPC) && !ctxt->set_abort) {
Tao Ma85db90e2008-11-12 08:27:01 +08003321 if (di->i_xattr_loc && !xbs->xattr_bh) {
3322 ret = ocfs2_xattr_block_find(inode,
Joel Becker6b240ff2009-08-14 18:02:52 -07003323 xi->xi_name_index,
3324 xi->xi_name, xbs);
Tao Ma85db90e2008-11-12 08:27:01 +08003325 if (ret)
3326 goto out;
3327
Tao Ma9f868f12008-11-19 16:48:42 +08003328 old_found = xis->not_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003329 xis->not_found = -ENODATA;
3330 ret = ocfs2_calc_xattr_set_need(inode,
3331 di,
3332 xi,
3333 xis,
3334 xbs,
3335 NULL,
3336 NULL,
3337 &credits);
Tao Ma9f868f12008-11-19 16:48:42 +08003338 xis->not_found = old_found;
Tao Ma85db90e2008-11-12 08:27:01 +08003339 if (ret) {
3340 mlog_errno(ret);
3341 goto out;
3342 }
3343
Tao Mac901fb02010-04-26 14:34:57 +08003344 ret = ocfs2_extend_trans(ctxt->handle, credits);
Tao Ma85db90e2008-11-12 08:27:01 +08003345 if (ret) {
3346 mlog_errno(ret);
3347 goto out;
3348 }
3349 }
3350 /*
3351 * If no space in inode, we will set extended attribute
3352 * into external block.
3353 */
3354 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
3355 if (ret)
3356 goto out;
3357 if (!xis->not_found) {
3358 /*
3359 * If succeed and that extended attribute
3360 * existing in inode, we will remove it.
3361 */
Joel Becker6b240ff2009-08-14 18:02:52 -07003362 xi->xi_value = NULL;
3363 xi->xi_value_len = 0;
Tao Ma85db90e2008-11-12 08:27:01 +08003364 xbs->not_found = -ENODATA;
3365 ret = ocfs2_calc_xattr_set_need(inode,
3366 di,
3367 xi,
3368 xis,
3369 xbs,
3370 NULL,
3371 NULL,
3372 &credits);
3373 if (ret) {
3374 mlog_errno(ret);
3375 goto out;
3376 }
3377
Tao Mac901fb02010-04-26 14:34:57 +08003378 ret = ocfs2_extend_trans(ctxt->handle, credits);
Tao Ma85db90e2008-11-12 08:27:01 +08003379 if (ret) {
3380 mlog_errno(ret);
3381 goto out;
3382 }
3383 ret = ocfs2_xattr_ibody_set(inode, xi,
3384 xis, ctxt);
3385 }
3386 }
3387 }
3388
Tao Ma4b3f6202008-12-05 06:20:55 +08003389 if (!ret) {
3390 /* Update inode ctime. */
Joel Becker0cf2f762009-02-12 16:41:25 -08003391 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
Tao Ma89a907a2009-02-17 04:39:28 +08003392 xis->inode_bh,
3393 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma4b3f6202008-12-05 06:20:55 +08003394 if (ret) {
3395 mlog_errno(ret);
3396 goto out;
3397 }
3398
3399 inode->i_ctime = CURRENT_TIME;
3400 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
3401 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
3402 ocfs2_journal_dirty(ctxt->handle, xis->inode_bh);
3403 }
Tao Ma85db90e2008-11-12 08:27:01 +08003404out:
3405 return ret;
3406}
3407
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003408/*
Tiger Yang6c3faba2008-11-14 11:16:03 +08003409 * This function only called duing creating inode
3410 * for init security/acl xattrs of the new inode.
Tiger Yang008aafa2008-12-09 16:43:08 +08003411 * All transanction credits have been reserved in mknod.
Tiger Yang6c3faba2008-11-14 11:16:03 +08003412 */
3413int ocfs2_xattr_set_handle(handle_t *handle,
3414 struct inode *inode,
3415 struct buffer_head *di_bh,
3416 int name_index,
3417 const char *name,
3418 const void *value,
3419 size_t value_len,
3420 int flags,
3421 struct ocfs2_alloc_context *meta_ac,
3422 struct ocfs2_alloc_context *data_ac)
3423{
3424 struct ocfs2_dinode *di;
3425 int ret;
3426
3427 struct ocfs2_xattr_info xi = {
Joel Becker6b240ff2009-08-14 18:02:52 -07003428 .xi_name_index = name_index,
3429 .xi_name = name,
Joel Becker18853b92009-08-14 18:17:07 -07003430 .xi_name_len = strlen(name),
Joel Becker6b240ff2009-08-14 18:02:52 -07003431 .xi_value = value,
3432 .xi_value_len = value_len,
Tiger Yang6c3faba2008-11-14 11:16:03 +08003433 };
3434
3435 struct ocfs2_xattr_search xis = {
3436 .not_found = -ENODATA,
3437 };
3438
3439 struct ocfs2_xattr_search xbs = {
3440 .not_found = -ENODATA,
3441 };
3442
3443 struct ocfs2_xattr_set_ctxt ctxt = {
3444 .handle = handle,
3445 .meta_ac = meta_ac,
3446 .data_ac = data_ac,
3447 };
3448
3449 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
3450 return -EOPNOTSUPP;
3451
Tiger Yang008aafa2008-12-09 16:43:08 +08003452 /*
3453 * In extreme situation, may need xattr bucket when
3454 * block size is too small. And we have already reserved
3455 * the credits for bucket in mknod.
3456 */
3457 if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) {
3458 xbs.bucket = ocfs2_xattr_bucket_new(inode);
3459 if (!xbs.bucket) {
3460 mlog_errno(-ENOMEM);
3461 return -ENOMEM;
3462 }
3463 }
3464
Tiger Yang6c3faba2008-11-14 11:16:03 +08003465 xis.inode_bh = xbs.inode_bh = di_bh;
3466 di = (struct ocfs2_dinode *)di_bh->b_data;
3467
3468 down_write(&OCFS2_I(inode)->ip_xattr_sem);
3469
3470 ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
3471 if (ret)
3472 goto cleanup;
3473 if (xis.not_found) {
3474 ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs);
3475 if (ret)
3476 goto cleanup;
3477 }
3478
3479 ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
3480
3481cleanup:
3482 up_write(&OCFS2_I(inode)->ip_xattr_sem);
3483 brelse(xbs.xattr_bh);
Tiger Yang008aafa2008-12-09 16:43:08 +08003484 ocfs2_xattr_bucket_free(xbs.bucket);
Tiger Yang6c3faba2008-11-14 11:16:03 +08003485
3486 return ret;
3487}
3488
3489/*
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003490 * ocfs2_xattr_set()
3491 *
3492 * Set, replace or remove an extended attribute for this inode.
3493 * value is NULL to remove an existing extended attribute, else either
3494 * create or replace an extended attribute.
3495 */
3496int ocfs2_xattr_set(struct inode *inode,
3497 int name_index,
3498 const char *name,
3499 const void *value,
3500 size_t value_len,
3501 int flags)
3502{
3503 struct buffer_head *di_bh = NULL;
3504 struct ocfs2_dinode *di;
Tao Ma492a8a32009-08-18 11:43:17 +08003505 int ret, credits, ref_meta = 0, ref_credits = 0;
Tao Ma78f30c32008-11-12 08:27:00 +08003506 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Tao Ma85db90e2008-11-12 08:27:01 +08003507 struct inode *tl_inode = osb->osb_tl_inode;
Tao Ma78f30c32008-11-12 08:27:00 +08003508 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
Tao Ma492a8a32009-08-18 11:43:17 +08003509 struct ocfs2_refcount_tree *ref_tree = NULL;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003510
3511 struct ocfs2_xattr_info xi = {
Joel Becker6b240ff2009-08-14 18:02:52 -07003512 .xi_name_index = name_index,
3513 .xi_name = name,
Joel Becker18853b92009-08-14 18:17:07 -07003514 .xi_name_len = strlen(name),
Joel Becker6b240ff2009-08-14 18:02:52 -07003515 .xi_value = value,
3516 .xi_value_len = value_len,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003517 };
3518
3519 struct ocfs2_xattr_search xis = {
3520 .not_found = -ENODATA,
3521 };
3522
3523 struct ocfs2_xattr_search xbs = {
3524 .not_found = -ENODATA,
3525 };
3526
Tiger Yang8154da32008-08-18 17:11:46 +08003527 if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
3528 return -EOPNOTSUPP;
3529
Joel Beckerba937122008-10-24 19:13:20 -07003530 /*
3531 * Only xbs will be used on indexed trees. xis doesn't need a
3532 * bucket.
3533 */
3534 xbs.bucket = ocfs2_xattr_bucket_new(inode);
3535 if (!xbs.bucket) {
3536 mlog_errno(-ENOMEM);
3537 return -ENOMEM;
3538 }
3539
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003540 ret = ocfs2_inode_lock(inode, &di_bh, 1);
3541 if (ret < 0) {
3542 mlog_errno(ret);
Joel Beckerba937122008-10-24 19:13:20 -07003543 goto cleanup_nolock;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003544 }
3545 xis.inode_bh = xbs.inode_bh = di_bh;
3546 di = (struct ocfs2_dinode *)di_bh->b_data;
3547
3548 down_write(&OCFS2_I(inode)->ip_xattr_sem);
3549 /*
3550 * Scan inode and external block to find the same name
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003551 * extended attribute and collect search information.
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003552 */
3553 ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
3554 if (ret)
3555 goto cleanup;
3556 if (xis.not_found) {
3557 ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs);
3558 if (ret)
3559 goto cleanup;
3560 }
3561
3562 if (xis.not_found && xbs.not_found) {
3563 ret = -ENODATA;
3564 if (flags & XATTR_REPLACE)
3565 goto cleanup;
3566 ret = 0;
3567 if (!value)
3568 goto cleanup;
3569 } else {
3570 ret = -EEXIST;
3571 if (flags & XATTR_CREATE)
3572 goto cleanup;
3573 }
3574
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003575 /* Check whether the value is refcounted and do some preparation. */
Tao Ma492a8a32009-08-18 11:43:17 +08003576 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL &&
3577 (!xis.not_found || !xbs.not_found)) {
3578 ret = ocfs2_prepare_refcount_xattr(inode, di, &xi,
3579 &xis, &xbs, &ref_tree,
3580 &ref_meta, &ref_credits);
3581 if (ret) {
3582 mlog_errno(ret);
3583 goto cleanup;
3584 }
3585 }
Tao Ma85db90e2008-11-12 08:27:01 +08003586
3587 mutex_lock(&tl_inode->i_mutex);
3588
3589 if (ocfs2_truncate_log_needs_flush(osb)) {
3590 ret = __ocfs2_flush_truncate_log(osb);
3591 if (ret < 0) {
3592 mutex_unlock(&tl_inode->i_mutex);
3593 mlog_errno(ret);
3594 goto cleanup;
3595 }
3596 }
3597 mutex_unlock(&tl_inode->i_mutex);
3598
3599 ret = ocfs2_init_xattr_set_ctxt(inode, di, &xi, &xis,
Tao Ma492a8a32009-08-18 11:43:17 +08003600 &xbs, &ctxt, ref_meta, &credits);
Tao Ma78f30c32008-11-12 08:27:00 +08003601 if (ret) {
3602 mlog_errno(ret);
3603 goto cleanup;
3604 }
3605
Tao Ma4b3f6202008-12-05 06:20:55 +08003606 /* we need to update inode's ctime field, so add credit for it. */
3607 credits += OCFS2_INODE_UPDATE_CREDITS;
Tao Ma492a8a32009-08-18 11:43:17 +08003608 ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
Tao Ma85db90e2008-11-12 08:27:01 +08003609 if (IS_ERR(ctxt.handle)) {
3610 ret = PTR_ERR(ctxt.handle);
3611 mlog_errno(ret);
3612 goto cleanup;
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003613 }
Tao Ma85db90e2008-11-12 08:27:01 +08003614
3615 ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
3616
3617 ocfs2_commit_trans(osb, ctxt.handle);
3618
Tao Ma78f30c32008-11-12 08:27:00 +08003619 if (ctxt.data_ac)
3620 ocfs2_free_alloc_context(ctxt.data_ac);
3621 if (ctxt.meta_ac)
3622 ocfs2_free_alloc_context(ctxt.meta_ac);
3623 if (ocfs2_dealloc_has_cluster(&ctxt.dealloc))
3624 ocfs2_schedule_truncate_log_flush(osb, 1);
3625 ocfs2_run_deallocs(osb, &ctxt.dealloc);
Tao Ma8b2c0db2009-08-18 11:43:49 +08003626
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003627cleanup:
Tao Ma492a8a32009-08-18 11:43:17 +08003628 if (ref_tree)
3629 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003630 up_write(&OCFS2_I(inode)->ip_xattr_sem);
Tao Ma8b2c0db2009-08-18 11:43:49 +08003631 if (!value && !ret) {
3632 ret = ocfs2_try_remove_refcount_tree(inode, di_bh);
3633 if (ret)
3634 mlog_errno(ret);
3635 }
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003636 ocfs2_inode_unlock(inode, 1);
Joel Beckerba937122008-10-24 19:13:20 -07003637cleanup_nolock:
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003638 brelse(di_bh);
3639 brelse(xbs.xattr_bh);
Joel Beckerba937122008-10-24 19:13:20 -07003640 ocfs2_xattr_bucket_free(xbs.bucket);
Tiger Yangcf1d6c72008-08-18 17:11:00 +08003641
3642 return ret;
3643}
3644
Tao Ma0c044f02008-08-18 17:38:50 +08003645/*
3646 * Find the xattr extent rec which may contains name_hash.
3647 * e_cpos will be the first name hash of the xattr rec.
3648 * el must be the ocfs2_xattr_header.xb_attrs.xb_root.xt_list.
3649 */
3650static int ocfs2_xattr_get_rec(struct inode *inode,
3651 u32 name_hash,
3652 u64 *p_blkno,
3653 u32 *e_cpos,
3654 u32 *num_clusters,
3655 struct ocfs2_extent_list *el)
3656{
3657 int ret = 0, i;
3658 struct buffer_head *eb_bh = NULL;
3659 struct ocfs2_extent_block *eb;
3660 struct ocfs2_extent_rec *rec = NULL;
3661 u64 e_blkno = 0;
3662
3663 if (el->l_tree_depth) {
Joel Beckerfacdb772009-02-12 18:08:48 -08003664 ret = ocfs2_find_leaf(INODE_CACHE(inode), el, name_hash,
3665 &eb_bh);
Tao Ma0c044f02008-08-18 17:38:50 +08003666 if (ret) {
3667 mlog_errno(ret);
3668 goto out;
3669 }
3670
3671 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
3672 el = &eb->h_list;
3673
3674 if (el->l_tree_depth) {
3675 ocfs2_error(inode->i_sb,
3676 "Inode %lu has non zero tree depth in "
3677 "xattr tree block %llu\n", inode->i_ino,
3678 (unsigned long long)eb_bh->b_blocknr);
3679 ret = -EROFS;
3680 goto out;
3681 }
3682 }
3683
3684 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
3685 rec = &el->l_recs[i];
3686
3687 if (le32_to_cpu(rec->e_cpos) <= name_hash) {
3688 e_blkno = le64_to_cpu(rec->e_blkno);
3689 break;
3690 }
3691 }
3692
3693 if (!e_blkno) {
3694 ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
3695 "record (%u, %u, 0) in xattr", inode->i_ino,
3696 le32_to_cpu(rec->e_cpos),
3697 ocfs2_rec_clusters(el, rec));
3698 ret = -EROFS;
3699 goto out;
3700 }
3701
3702 *p_blkno = le64_to_cpu(rec->e_blkno);
3703 *num_clusters = le16_to_cpu(rec->e_leaf_clusters);
3704 if (e_cpos)
3705 *e_cpos = le32_to_cpu(rec->e_cpos);
3706out:
3707 brelse(eb_bh);
3708 return ret;
3709}
3710
3711typedef int (xattr_bucket_func)(struct inode *inode,
3712 struct ocfs2_xattr_bucket *bucket,
3713 void *para);
3714
Tao Ma589dc262008-08-18 17:38:51 +08003715static int ocfs2_find_xe_in_bucket(struct inode *inode,
Joel Beckere2356a32008-10-27 15:01:54 -07003716 struct ocfs2_xattr_bucket *bucket,
Tao Ma589dc262008-08-18 17:38:51 +08003717 int name_index,
3718 const char *name,
3719 u32 name_hash,
3720 u16 *xe_index,
3721 int *found)
3722{
3723 int i, ret = 0, cmp = 1, block_off, new_offset;
Joel Beckere2356a32008-10-27 15:01:54 -07003724 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Ma589dc262008-08-18 17:38:51 +08003725 size_t name_len = strlen(name);
3726 struct ocfs2_xattr_entry *xe = NULL;
Tao Ma589dc262008-08-18 17:38:51 +08003727 char *xe_name;
3728
3729 /*
3730 * We don't use binary search in the bucket because there
3731 * may be multiple entries with the same name hash.
3732 */
3733 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
3734 xe = &xh->xh_entries[i];
3735
3736 if (name_hash > le32_to_cpu(xe->xe_name_hash))
3737 continue;
3738 else if (name_hash < le32_to_cpu(xe->xe_name_hash))
3739 break;
3740
3741 cmp = name_index - ocfs2_xattr_get_type(xe);
3742 if (!cmp)
3743 cmp = name_len - xe->xe_name_len;
3744 if (cmp)
3745 continue;
3746
Tao Mafd68a892009-08-18 11:43:21 +08003747 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Tao Ma589dc262008-08-18 17:38:51 +08003748 xh,
3749 i,
3750 &block_off,
3751 &new_offset);
3752 if (ret) {
3753 mlog_errno(ret);
3754 break;
3755 }
3756
Joel Becker970e4932008-11-13 14:49:19 -08003757
Joel Beckere2356a32008-10-27 15:01:54 -07003758 xe_name = bucket_block(bucket, block_off) + new_offset;
3759 if (!memcmp(name, xe_name, name_len)) {
Tao Ma589dc262008-08-18 17:38:51 +08003760 *xe_index = i;
3761 *found = 1;
3762 ret = 0;
3763 break;
3764 }
3765 }
3766
3767 return ret;
3768}
3769
3770/*
3771 * Find the specified xattr entry in a series of buckets.
3772 * This series start from p_blkno and last for num_clusters.
3773 * The ocfs2_xattr_header.xh_num_buckets of the first bucket contains
3774 * the num of the valid buckets.
3775 *
3776 * Return the buffer_head this xattr should reside in. And if the xattr's
3777 * hash is in the gap of 2 buckets, return the lower bucket.
3778 */
3779static int ocfs2_xattr_bucket_find(struct inode *inode,
3780 int name_index,
3781 const char *name,
3782 u32 name_hash,
3783 u64 p_blkno,
3784 u32 first_hash,
3785 u32 num_clusters,
3786 struct ocfs2_xattr_search *xs)
3787{
3788 int ret, found = 0;
Tao Ma589dc262008-08-18 17:38:51 +08003789 struct ocfs2_xattr_header *xh = NULL;
3790 struct ocfs2_xattr_entry *xe = NULL;
3791 u16 index = 0;
3792 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
3793 int low_bucket = 0, bucket, high_bucket;
Joel Beckere2356a32008-10-27 15:01:54 -07003794 struct ocfs2_xattr_bucket *search;
Tao Ma589dc262008-08-18 17:38:51 +08003795 u32 last_hash;
Joel Beckere2356a32008-10-27 15:01:54 -07003796 u64 blkno, lower_blkno = 0;
Tao Ma589dc262008-08-18 17:38:51 +08003797
Joel Beckere2356a32008-10-27 15:01:54 -07003798 search = ocfs2_xattr_bucket_new(inode);
3799 if (!search) {
3800 ret = -ENOMEM;
3801 mlog_errno(ret);
3802 goto out;
3803 }
3804
3805 ret = ocfs2_read_xattr_bucket(search, p_blkno);
Tao Ma589dc262008-08-18 17:38:51 +08003806 if (ret) {
3807 mlog_errno(ret);
3808 goto out;
3809 }
3810
Joel Beckere2356a32008-10-27 15:01:54 -07003811 xh = bucket_xh(search);
Tao Ma589dc262008-08-18 17:38:51 +08003812 high_bucket = le16_to_cpu(xh->xh_num_buckets) - 1;
Tao Ma589dc262008-08-18 17:38:51 +08003813 while (low_bucket <= high_bucket) {
Joel Beckere2356a32008-10-27 15:01:54 -07003814 ocfs2_xattr_bucket_relse(search);
3815
Tao Ma589dc262008-08-18 17:38:51 +08003816 bucket = (low_bucket + high_bucket) / 2;
Tao Ma589dc262008-08-18 17:38:51 +08003817 blkno = p_blkno + bucket * blk_per_bucket;
Joel Beckere2356a32008-10-27 15:01:54 -07003818 ret = ocfs2_read_xattr_bucket(search, blkno);
Tao Ma589dc262008-08-18 17:38:51 +08003819 if (ret) {
3820 mlog_errno(ret);
3821 goto out;
3822 }
3823
Joel Beckere2356a32008-10-27 15:01:54 -07003824 xh = bucket_xh(search);
Tao Ma589dc262008-08-18 17:38:51 +08003825 xe = &xh->xh_entries[0];
3826 if (name_hash < le32_to_cpu(xe->xe_name_hash)) {
3827 high_bucket = bucket - 1;
3828 continue;
3829 }
3830
3831 /*
3832 * Check whether the hash of the last entry in our
Tao Ma5a095612008-09-19 22:17:41 +08003833 * bucket is larger than the search one. for an empty
3834 * bucket, the last one is also the first one.
Tao Ma589dc262008-08-18 17:38:51 +08003835 */
Tao Ma5a095612008-09-19 22:17:41 +08003836 if (xh->xh_count)
3837 xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
3838
Tao Ma589dc262008-08-18 17:38:51 +08003839 last_hash = le32_to_cpu(xe->xe_name_hash);
3840
Joel Beckere2356a32008-10-27 15:01:54 -07003841 /* record lower_blkno which may be the insert place. */
3842 lower_blkno = blkno;
Tao Ma589dc262008-08-18 17:38:51 +08003843
3844 if (name_hash > le32_to_cpu(xe->xe_name_hash)) {
3845 low_bucket = bucket + 1;
3846 continue;
3847 }
3848
3849 /* the searched xattr should reside in this bucket if exists. */
Joel Beckere2356a32008-10-27 15:01:54 -07003850 ret = ocfs2_find_xe_in_bucket(inode, search,
Tao Ma589dc262008-08-18 17:38:51 +08003851 name_index, name, name_hash,
3852 &index, &found);
3853 if (ret) {
3854 mlog_errno(ret);
3855 goto out;
3856 }
3857 break;
3858 }
3859
3860 /*
3861 * Record the bucket we have found.
3862 * When the xattr's hash value is in the gap of 2 buckets, we will
3863 * always set it to the previous bucket.
3864 */
Joel Beckere2356a32008-10-27 15:01:54 -07003865 if (!lower_blkno)
3866 lower_blkno = p_blkno;
3867
3868 /* This should be in cache - we just read it during the search */
3869 ret = ocfs2_read_xattr_bucket(xs->bucket, lower_blkno);
3870 if (ret) {
3871 mlog_errno(ret);
3872 goto out;
Tao Ma589dc262008-08-18 17:38:51 +08003873 }
Tao Ma589dc262008-08-18 17:38:51 +08003874
Joel Beckerba937122008-10-24 19:13:20 -07003875 xs->header = bucket_xh(xs->bucket);
3876 xs->base = bucket_block(xs->bucket, 0);
Tao Ma589dc262008-08-18 17:38:51 +08003877 xs->end = xs->base + inode->i_sb->s_blocksize;
3878
3879 if (found) {
Tao Ma589dc262008-08-18 17:38:51 +08003880 xs->here = &xs->header->xh_entries[index];
Tao Ma402b4182011-02-23 22:01:17 +08003881 trace_ocfs2_xattr_bucket_find(OCFS2_I(inode)->ip_blkno,
3882 name, name_index, name_hash,
3883 (unsigned long long)bucket_blkno(xs->bucket),
3884 index);
Tao Ma589dc262008-08-18 17:38:51 +08003885 } else
3886 ret = -ENODATA;
3887
3888out:
Joel Beckere2356a32008-10-27 15:01:54 -07003889 ocfs2_xattr_bucket_free(search);
Tao Ma589dc262008-08-18 17:38:51 +08003890 return ret;
3891}
3892
3893static int ocfs2_xattr_index_block_find(struct inode *inode,
3894 struct buffer_head *root_bh,
3895 int name_index,
3896 const char *name,
3897 struct ocfs2_xattr_search *xs)
3898{
3899 int ret;
3900 struct ocfs2_xattr_block *xb =
3901 (struct ocfs2_xattr_block *)root_bh->b_data;
3902 struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root;
3903 struct ocfs2_extent_list *el = &xb_root->xt_list;
3904 u64 p_blkno = 0;
3905 u32 first_hash, num_clusters = 0;
Tao Ma2057e5c2008-10-09 23:06:13 +08003906 u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name));
Tao Ma589dc262008-08-18 17:38:51 +08003907
3908 if (le16_to_cpu(el->l_next_free_rec) == 0)
3909 return -ENODATA;
3910
Tao Ma402b4182011-02-23 22:01:17 +08003911 trace_ocfs2_xattr_index_block_find(OCFS2_I(inode)->ip_blkno,
3912 name, name_index, name_hash,
3913 (unsigned long long)root_bh->b_blocknr,
3914 -1);
Tao Ma589dc262008-08-18 17:38:51 +08003915
3916 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash,
3917 &num_clusters, el);
3918 if (ret) {
3919 mlog_errno(ret);
3920 goto out;
3921 }
3922
3923 BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash);
3924
Tao Ma402b4182011-02-23 22:01:17 +08003925 trace_ocfs2_xattr_index_block_find_rec(OCFS2_I(inode)->ip_blkno,
3926 name, name_index, first_hash,
3927 (unsigned long long)p_blkno,
3928 num_clusters);
Tao Ma589dc262008-08-18 17:38:51 +08003929
3930 ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash,
3931 p_blkno, first_hash, num_clusters, xs);
3932
3933out:
3934 return ret;
3935}
3936
Tao Ma0c044f02008-08-18 17:38:50 +08003937static int ocfs2_iterate_xattr_buckets(struct inode *inode,
3938 u64 blkno,
3939 u32 clusters,
3940 xattr_bucket_func *func,
3941 void *para)
3942{
Joel Becker6dde41d2008-10-24 17:16:48 -07003943 int i, ret = 0;
Tao Ma0c044f02008-08-18 17:38:50 +08003944 u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
3945 u32 num_buckets = clusters * bpc;
Joel Beckerba937122008-10-24 19:13:20 -07003946 struct ocfs2_xattr_bucket *bucket;
Tao Ma0c044f02008-08-18 17:38:50 +08003947
Joel Beckerba937122008-10-24 19:13:20 -07003948 bucket = ocfs2_xattr_bucket_new(inode);
3949 if (!bucket) {
3950 mlog_errno(-ENOMEM);
3951 return -ENOMEM;
3952 }
Tao Ma0c044f02008-08-18 17:38:50 +08003953
Tao Ma402b4182011-02-23 22:01:17 +08003954 trace_ocfs2_iterate_xattr_buckets(
3955 (unsigned long long)OCFS2_I(inode)->ip_blkno,
3956 (unsigned long long)blkno, clusters);
Tao Ma0c044f02008-08-18 17:38:50 +08003957
Joel Beckerba937122008-10-24 19:13:20 -07003958 for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) {
3959 ret = ocfs2_read_xattr_bucket(bucket, blkno);
Tao Ma0c044f02008-08-18 17:38:50 +08003960 if (ret) {
3961 mlog_errno(ret);
Joel Beckerba937122008-10-24 19:13:20 -07003962 break;
Tao Ma0c044f02008-08-18 17:38:50 +08003963 }
3964
Tao Ma0c044f02008-08-18 17:38:50 +08003965 /*
3966 * The real bucket num in this series of blocks is stored
3967 * in the 1st bucket.
3968 */
3969 if (i == 0)
Joel Beckerba937122008-10-24 19:13:20 -07003970 num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets);
Tao Ma0c044f02008-08-18 17:38:50 +08003971
Tao Ma402b4182011-02-23 22:01:17 +08003972 trace_ocfs2_iterate_xattr_bucket((unsigned long long)blkno,
Joel Beckerba937122008-10-24 19:13:20 -07003973 le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash));
Tao Ma0c044f02008-08-18 17:38:50 +08003974 if (func) {
Joel Beckerba937122008-10-24 19:13:20 -07003975 ret = func(inode, bucket, para);
Tao Maa46fa682009-05-04 05:18:09 +08003976 if (ret && ret != -ERANGE)
Tao Ma0c044f02008-08-18 17:38:50 +08003977 mlog_errno(ret);
Joel Beckerba937122008-10-24 19:13:20 -07003978 /* Fall through to bucket_relse() */
Tao Ma0c044f02008-08-18 17:38:50 +08003979 }
3980
Joel Beckerba937122008-10-24 19:13:20 -07003981 ocfs2_xattr_bucket_relse(bucket);
3982 if (ret)
3983 break;
Tao Ma0c044f02008-08-18 17:38:50 +08003984 }
3985
Joel Beckerba937122008-10-24 19:13:20 -07003986 ocfs2_xattr_bucket_free(bucket);
Tao Ma0c044f02008-08-18 17:38:50 +08003987 return ret;
3988}
3989
3990struct ocfs2_xattr_tree_list {
3991 char *buffer;
3992 size_t buffer_size;
Tao Ma936b8832008-10-09 23:06:14 +08003993 size_t result;
Tao Ma0c044f02008-08-18 17:38:50 +08003994};
3995
Tao Mafd68a892009-08-18 11:43:21 +08003996static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
Tao Ma0c044f02008-08-18 17:38:50 +08003997 struct ocfs2_xattr_header *xh,
3998 int index,
3999 int *block_off,
4000 int *new_offset)
4001{
4002 u16 name_offset;
4003
4004 if (index < 0 || index >= le16_to_cpu(xh->xh_count))
4005 return -EINVAL;
4006
4007 name_offset = le16_to_cpu(xh->xh_entries[index].xe_name_offset);
4008
Tao Mafd68a892009-08-18 11:43:21 +08004009 *block_off = name_offset >> sb->s_blocksize_bits;
4010 *new_offset = name_offset % sb->s_blocksize;
Tao Ma0c044f02008-08-18 17:38:50 +08004011
4012 return 0;
4013}
4014
4015static int ocfs2_list_xattr_bucket(struct inode *inode,
4016 struct ocfs2_xattr_bucket *bucket,
4017 void *para)
4018{
Tao Ma936b8832008-10-09 23:06:14 +08004019 int ret = 0, type;
Tao Ma0c044f02008-08-18 17:38:50 +08004020 struct ocfs2_xattr_tree_list *xl = (struct ocfs2_xattr_tree_list *)para;
Tao Ma0c044f02008-08-18 17:38:50 +08004021 int i, block_off, new_offset;
Tao Ma936b8832008-10-09 23:06:14 +08004022 const char *prefix, *name;
Tao Ma0c044f02008-08-18 17:38:50 +08004023
Joel Becker3e632942008-10-24 17:04:49 -07004024 for (i = 0 ; i < le16_to_cpu(bucket_xh(bucket)->xh_count); i++) {
4025 struct ocfs2_xattr_entry *entry = &bucket_xh(bucket)->xh_entries[i];
Tao Ma936b8832008-10-09 23:06:14 +08004026 type = ocfs2_xattr_get_type(entry);
4027 prefix = ocfs2_xattr_prefix(type);
Tao Ma0c044f02008-08-18 17:38:50 +08004028
Tao Ma936b8832008-10-09 23:06:14 +08004029 if (prefix) {
Tao Mafd68a892009-08-18 11:43:21 +08004030 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Joel Becker3e632942008-10-24 17:04:49 -07004031 bucket_xh(bucket),
Tao Ma0c044f02008-08-18 17:38:50 +08004032 i,
4033 &block_off,
4034 &new_offset);
4035 if (ret)
4036 break;
Tao Ma936b8832008-10-09 23:06:14 +08004037
Joel Becker51def392008-10-24 16:57:21 -07004038 name = (const char *)bucket_block(bucket, block_off) +
Tao Ma936b8832008-10-09 23:06:14 +08004039 new_offset;
4040 ret = ocfs2_xattr_list_entry(xl->buffer,
4041 xl->buffer_size,
4042 &xl->result,
4043 prefix, name,
4044 entry->xe_name_len);
4045 if (ret)
4046 break;
Tao Ma0c044f02008-08-18 17:38:50 +08004047 }
4048 }
4049
4050 return ret;
4051}
4052
Tao Ma47bca492009-08-18 11:43:42 +08004053static int ocfs2_iterate_xattr_index_block(struct inode *inode,
4054 struct buffer_head *blk_bh,
4055 xattr_tree_rec_func *rec_func,
4056 void *para)
Tao Ma0c044f02008-08-18 17:38:50 +08004057{
Tao Ma47bca492009-08-18 11:43:42 +08004058 struct ocfs2_xattr_block *xb =
4059 (struct ocfs2_xattr_block *)blk_bh->b_data;
4060 struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list;
Tao Ma0c044f02008-08-18 17:38:50 +08004061 int ret = 0;
4062 u32 name_hash = UINT_MAX, e_cpos = 0, num_clusters = 0;
4063 u64 p_blkno = 0;
Tao Ma0c044f02008-08-18 17:38:50 +08004064
Tao Ma47bca492009-08-18 11:43:42 +08004065 if (!el->l_next_free_rec || !rec_func)
Tao Ma0c044f02008-08-18 17:38:50 +08004066 return 0;
4067
4068 while (name_hash > 0) {
4069 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno,
4070 &e_cpos, &num_clusters, el);
4071 if (ret) {
4072 mlog_errno(ret);
Tao Ma47bca492009-08-18 11:43:42 +08004073 break;
Tao Ma0c044f02008-08-18 17:38:50 +08004074 }
4075
Tao Ma47bca492009-08-18 11:43:42 +08004076 ret = rec_func(inode, blk_bh, p_blkno, e_cpos,
4077 num_clusters, para);
Tao Ma0c044f02008-08-18 17:38:50 +08004078 if (ret) {
Tao Maa46fa682009-05-04 05:18:09 +08004079 if (ret != -ERANGE)
4080 mlog_errno(ret);
Tao Ma47bca492009-08-18 11:43:42 +08004081 break;
Tao Ma0c044f02008-08-18 17:38:50 +08004082 }
4083
4084 if (e_cpos == 0)
4085 break;
4086
4087 name_hash = e_cpos - 1;
4088 }
4089
Tao Ma47bca492009-08-18 11:43:42 +08004090 return ret;
4091
4092}
4093
4094static int ocfs2_list_xattr_tree_rec(struct inode *inode,
4095 struct buffer_head *root_bh,
4096 u64 blkno, u32 cpos, u32 len, void *para)
4097{
4098 return ocfs2_iterate_xattr_buckets(inode, blkno, len,
4099 ocfs2_list_xattr_bucket, para);
4100}
4101
4102static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
4103 struct buffer_head *blk_bh,
4104 char *buffer,
4105 size_t buffer_size)
4106{
4107 int ret;
4108 struct ocfs2_xattr_tree_list xl = {
4109 .buffer = buffer,
4110 .buffer_size = buffer_size,
4111 .result = 0,
4112 };
4113
4114 ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
4115 ocfs2_list_xattr_tree_rec, &xl);
4116 if (ret) {
4117 mlog_errno(ret);
4118 goto out;
4119 }
4120
Tao Ma936b8832008-10-09 23:06:14 +08004121 ret = xl.result;
Tao Ma0c044f02008-08-18 17:38:50 +08004122out:
4123 return ret;
4124}
Tao Ma01225592008-08-18 17:38:53 +08004125
4126static int cmp_xe(const void *a, const void *b)
4127{
4128 const struct ocfs2_xattr_entry *l = a, *r = b;
4129 u32 l_hash = le32_to_cpu(l->xe_name_hash);
4130 u32 r_hash = le32_to_cpu(r->xe_name_hash);
4131
4132 if (l_hash > r_hash)
4133 return 1;
4134 if (l_hash < r_hash)
4135 return -1;
4136 return 0;
4137}
4138
4139static void swap_xe(void *a, void *b, int size)
4140{
4141 struct ocfs2_xattr_entry *l = a, *r = b, tmp;
4142
4143 tmp = *l;
4144 memcpy(l, r, sizeof(struct ocfs2_xattr_entry));
4145 memcpy(r, &tmp, sizeof(struct ocfs2_xattr_entry));
4146}
4147
4148/*
4149 * When the ocfs2_xattr_block is filled up, new bucket will be created
4150 * and all the xattr entries will be moved to the new bucket.
Joel Becker178eeac2008-10-27 15:18:29 -07004151 * The header goes at the start of the bucket, and the names+values are
4152 * filled from the end. This is why *target starts as the last buffer.
Tao Ma01225592008-08-18 17:38:53 +08004153 * Note: we need to sort the entries since they are not saved in order
4154 * in the ocfs2_xattr_block.
4155 */
4156static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
4157 struct buffer_head *xb_bh,
Joel Becker178eeac2008-10-27 15:18:29 -07004158 struct ocfs2_xattr_bucket *bucket)
Tao Ma01225592008-08-18 17:38:53 +08004159{
4160 int i, blocksize = inode->i_sb->s_blocksize;
Joel Becker178eeac2008-10-27 15:18:29 -07004161 int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Ma01225592008-08-18 17:38:53 +08004162 u16 offset, size, off_change;
4163 struct ocfs2_xattr_entry *xe;
4164 struct ocfs2_xattr_block *xb =
4165 (struct ocfs2_xattr_block *)xb_bh->b_data;
4166 struct ocfs2_xattr_header *xb_xh = &xb->xb_attrs.xb_header;
Joel Becker178eeac2008-10-27 15:18:29 -07004167 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Ma01225592008-08-18 17:38:53 +08004168 u16 count = le16_to_cpu(xb_xh->xh_count);
Joel Becker178eeac2008-10-27 15:18:29 -07004169 char *src = xb_bh->b_data;
4170 char *target = bucket_block(bucket, blks - 1);
Tao Ma01225592008-08-18 17:38:53 +08004171
Tao Ma402b4182011-02-23 22:01:17 +08004172 trace_ocfs2_cp_xattr_block_to_bucket_begin(
4173 (unsigned long long)xb_bh->b_blocknr,
4174 (unsigned long long)bucket_blkno(bucket));
Tao Ma01225592008-08-18 17:38:53 +08004175
Joel Becker178eeac2008-10-27 15:18:29 -07004176 for (i = 0; i < blks; i++)
4177 memset(bucket_block(bucket, i), 0, blocksize);
4178
Tao Ma01225592008-08-18 17:38:53 +08004179 /*
4180 * Since the xe_name_offset is based on ocfs2_xattr_header,
4181 * there is a offset change corresponding to the change of
4182 * ocfs2_xattr_header's position.
4183 */
4184 off_change = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
4185 xe = &xb_xh->xh_entries[count - 1];
4186 offset = le16_to_cpu(xe->xe_name_offset) + off_change;
4187 size = blocksize - offset;
4188
4189 /* copy all the names and values. */
Tao Ma01225592008-08-18 17:38:53 +08004190 memcpy(target + offset, src + offset, size);
4191
4192 /* Init new header now. */
4193 xh->xh_count = xb_xh->xh_count;
4194 xh->xh_num_buckets = cpu_to_le16(1);
4195 xh->xh_name_value_len = cpu_to_le16(size);
4196 xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE - size);
4197
4198 /* copy all the entries. */
Joel Becker178eeac2008-10-27 15:18:29 -07004199 target = bucket_block(bucket, 0);
Tao Ma01225592008-08-18 17:38:53 +08004200 offset = offsetof(struct ocfs2_xattr_header, xh_entries);
4201 size = count * sizeof(struct ocfs2_xattr_entry);
4202 memcpy(target + offset, (char *)xb_xh + offset, size);
4203
4204 /* Change the xe offset for all the xe because of the move. */
4205 off_change = OCFS2_XATTR_BUCKET_SIZE - blocksize +
4206 offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
4207 for (i = 0; i < count; i++)
4208 le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change);
4209
Tao Ma402b4182011-02-23 22:01:17 +08004210 trace_ocfs2_cp_xattr_block_to_bucket_end(offset, size, off_change);
Tao Ma01225592008-08-18 17:38:53 +08004211
4212 sort(target + offset, count, sizeof(struct ocfs2_xattr_entry),
4213 cmp_xe, swap_xe);
4214}
4215
4216/*
4217 * After we move xattr from block to index btree, we have to
4218 * update ocfs2_xattr_search to the new xe and base.
4219 *
4220 * When the entry is in xattr block, xattr_bh indicates the storage place.
4221 * While if the entry is in index b-tree, "bucket" indicates the
4222 * real place of the xattr.
4223 */
Joel Becker178eeac2008-10-27 15:18:29 -07004224static void ocfs2_xattr_update_xattr_search(struct inode *inode,
4225 struct ocfs2_xattr_search *xs,
4226 struct buffer_head *old_bh)
Tao Ma01225592008-08-18 17:38:53 +08004227{
Tao Ma01225592008-08-18 17:38:53 +08004228 char *buf = old_bh->b_data;
4229 struct ocfs2_xattr_block *old_xb = (struct ocfs2_xattr_block *)buf;
4230 struct ocfs2_xattr_header *old_xh = &old_xb->xb_attrs.xb_header;
Joel Becker178eeac2008-10-27 15:18:29 -07004231 int i;
Tao Ma01225592008-08-18 17:38:53 +08004232
Joel Beckerba937122008-10-24 19:13:20 -07004233 xs->header = bucket_xh(xs->bucket);
Joel Becker178eeac2008-10-27 15:18:29 -07004234 xs->base = bucket_block(xs->bucket, 0);
Tao Ma01225592008-08-18 17:38:53 +08004235 xs->end = xs->base + inode->i_sb->s_blocksize;
4236
Joel Becker178eeac2008-10-27 15:18:29 -07004237 if (xs->not_found)
4238 return;
Tao Ma01225592008-08-18 17:38:53 +08004239
Joel Becker178eeac2008-10-27 15:18:29 -07004240 i = xs->here - old_xh->xh_entries;
4241 xs->here = &xs->header->xh_entries[i];
Tao Ma01225592008-08-18 17:38:53 +08004242}
4243
4244static int ocfs2_xattr_create_index_block(struct inode *inode,
Tao Ma78f30c32008-11-12 08:27:00 +08004245 struct ocfs2_xattr_search *xs,
4246 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08004247{
Tao Ma85db90e2008-11-12 08:27:01 +08004248 int ret;
Tao Ma01225592008-08-18 17:38:53 +08004249 u32 bit_off, len;
4250 u64 blkno;
Tao Ma85db90e2008-11-12 08:27:01 +08004251 handle_t *handle = ctxt->handle;
Tao Ma01225592008-08-18 17:38:53 +08004252 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Tao Ma01225592008-08-18 17:38:53 +08004253 struct buffer_head *xb_bh = xs->xattr_bh;
4254 struct ocfs2_xattr_block *xb =
4255 (struct ocfs2_xattr_block *)xb_bh->b_data;
4256 struct ocfs2_xattr_tree_root *xr;
4257 u16 xb_flags = le16_to_cpu(xb->xb_flags);
Tao Ma01225592008-08-18 17:38:53 +08004258
Tao Ma402b4182011-02-23 22:01:17 +08004259 trace_ocfs2_xattr_create_index_block_begin(
4260 (unsigned long long)xb_bh->b_blocknr);
Tao Ma01225592008-08-18 17:38:53 +08004261
4262 BUG_ON(xb_flags & OCFS2_XATTR_INDEXED);
Joel Becker178eeac2008-10-27 15:18:29 -07004263 BUG_ON(!xs->bucket);
Tao Ma01225592008-08-18 17:38:53 +08004264
Tao Ma01225592008-08-18 17:38:53 +08004265 /*
4266 * XXX:
4267 * We can use this lock for now, and maybe move to a dedicated mutex
4268 * if performance becomes a problem later.
4269 */
4270 down_write(&oi->ip_alloc_sem);
4271
Joel Becker0cf2f762009-02-12 16:41:25 -08004272 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), xb_bh,
Joel Becker84008972008-12-09 16:11:49 -08004273 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08004274 if (ret) {
4275 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004276 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004277 }
4278
Joel Becker1ed9b772010-05-06 13:59:06 +08004279 ret = __ocfs2_claim_clusters(handle, ctxt->data_ac,
Tao Ma78f30c32008-11-12 08:27:00 +08004280 1, 1, &bit_off, &len);
Tao Ma01225592008-08-18 17:38:53 +08004281 if (ret) {
4282 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004283 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004284 }
4285
4286 /*
4287 * The bucket may spread in many blocks, and
4288 * we will only touch the 1st block and the last block
4289 * in the whole bucket(one for entry and one for data).
4290 */
4291 blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
4292
Tao Ma402b4182011-02-23 22:01:17 +08004293 trace_ocfs2_xattr_create_index_block((unsigned long long)blkno);
Tao Ma01225592008-08-18 17:38:53 +08004294
Joel Becker178eeac2008-10-27 15:18:29 -07004295 ret = ocfs2_init_xattr_bucket(xs->bucket, blkno);
Tao Ma01225592008-08-18 17:38:53 +08004296 if (ret) {
4297 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004298 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004299 }
4300
Joel Becker178eeac2008-10-27 15:18:29 -07004301 ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
4302 OCFS2_JOURNAL_ACCESS_CREATE);
Joel Beckerbd60bd32008-10-20 18:25:56 -07004303 if (ret) {
4304 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004305 goto out;
Joel Beckerbd60bd32008-10-20 18:25:56 -07004306 }
Tao Ma01225592008-08-18 17:38:53 +08004307
Joel Becker178eeac2008-10-27 15:18:29 -07004308 ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xs->bucket);
4309 ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
4310
4311 ocfs2_xattr_update_xattr_search(inode, xs, xb_bh);
4312
Tao Ma01225592008-08-18 17:38:53 +08004313 /* Change from ocfs2_xattr_header to ocfs2_xattr_tree_root */
4314 memset(&xb->xb_attrs, 0, inode->i_sb->s_blocksize -
4315 offsetof(struct ocfs2_xattr_block, xb_attrs));
4316
4317 xr = &xb->xb_attrs.xb_root;
4318 xr->xt_clusters = cpu_to_le32(1);
4319 xr->xt_last_eb_blk = 0;
4320 xr->xt_list.l_tree_depth = 0;
4321 xr->xt_list.l_count = cpu_to_le16(ocfs2_xattr_recs_per_xb(inode->i_sb));
4322 xr->xt_list.l_next_free_rec = cpu_to_le16(1);
4323
4324 xr->xt_list.l_recs[0].e_cpos = 0;
4325 xr->xt_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
4326 xr->xt_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
4327
4328 xb->xb_flags = cpu_to_le16(xb_flags | OCFS2_XATTR_INDEXED);
4329
Tao Ma85db90e2008-11-12 08:27:01 +08004330 ocfs2_journal_dirty(handle, xb_bh);
Tao Ma01225592008-08-18 17:38:53 +08004331
Tao Ma85db90e2008-11-12 08:27:01 +08004332out:
Tao Ma01225592008-08-18 17:38:53 +08004333 up_write(&oi->ip_alloc_sem);
4334
Tao Ma01225592008-08-18 17:38:53 +08004335 return ret;
4336}
4337
4338static int cmp_xe_offset(const void *a, const void *b)
4339{
4340 const struct ocfs2_xattr_entry *l = a, *r = b;
4341 u32 l_name_offset = le16_to_cpu(l->xe_name_offset);
4342 u32 r_name_offset = le16_to_cpu(r->xe_name_offset);
4343
4344 if (l_name_offset < r_name_offset)
4345 return 1;
4346 if (l_name_offset > r_name_offset)
4347 return -1;
4348 return 0;
4349}
4350
4351/*
4352 * defrag a xattr bucket if we find that the bucket has some
4353 * holes beteen name/value pairs.
4354 * We will move all the name/value pairs to the end of the bucket
4355 * so that we can spare some space for insertion.
4356 */
4357static int ocfs2_defrag_xattr_bucket(struct inode *inode,
Tao Ma85db90e2008-11-12 08:27:01 +08004358 handle_t *handle,
Tao Ma01225592008-08-18 17:38:53 +08004359 struct ocfs2_xattr_bucket *bucket)
4360{
4361 int ret, i;
Joel Becker199799a2009-08-14 19:04:15 -07004362 size_t end, offset, len;
Tao Ma01225592008-08-18 17:38:53 +08004363 struct ocfs2_xattr_header *xh;
4364 char *entries, *buf, *bucket_buf = NULL;
Joel Becker9c7759a2008-10-24 16:21:03 -07004365 u64 blkno = bucket_blkno(bucket);
Tao Ma01225592008-08-18 17:38:53 +08004366 u16 xh_free_start;
Tao Ma01225592008-08-18 17:38:53 +08004367 size_t blocksize = inode->i_sb->s_blocksize;
Tao Ma01225592008-08-18 17:38:53 +08004368 struct ocfs2_xattr_entry *xe;
Tao Ma01225592008-08-18 17:38:53 +08004369
4370 /*
4371 * In order to make the operation more efficient and generic,
4372 * we copy all the blocks into a contiguous memory and do the
4373 * defragment there, so if anything is error, we will not touch
4374 * the real block.
4375 */
4376 bucket_buf = kmalloc(OCFS2_XATTR_BUCKET_SIZE, GFP_NOFS);
4377 if (!bucket_buf) {
4378 ret = -EIO;
4379 goto out;
4380 }
4381
Joel Becker161d6f32008-10-27 15:25:18 -07004382 buf = bucket_buf;
Tao Ma1c32a2f2008-11-06 08:10:47 +08004383 for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
4384 memcpy(buf, bucket_block(bucket, i), blocksize);
Joel Becker161d6f32008-10-27 15:25:18 -07004385
Tao Ma1c32a2f2008-11-06 08:10:47 +08004386 ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
Joel Becker161d6f32008-10-27 15:25:18 -07004387 OCFS2_JOURNAL_ACCESS_WRITE);
4388 if (ret < 0) {
4389 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08004390 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004391 }
4392
4393 xh = (struct ocfs2_xattr_header *)bucket_buf;
4394 entries = (char *)xh->xh_entries;
4395 xh_free_start = le16_to_cpu(xh->xh_free_start);
4396
Tao Ma402b4182011-02-23 22:01:17 +08004397 trace_ocfs2_defrag_xattr_bucket(
Mark Fashehde29c082008-10-29 14:45:30 -07004398 (unsigned long long)blkno, le16_to_cpu(xh->xh_count),
4399 xh_free_start, le16_to_cpu(xh->xh_name_value_len));
Tao Ma01225592008-08-18 17:38:53 +08004400
4401 /*
4402 * sort all the entries by their offset.
4403 * the largest will be the first, so that we can
4404 * move them to the end one by one.
4405 */
4406 sort(entries, le16_to_cpu(xh->xh_count),
4407 sizeof(struct ocfs2_xattr_entry),
4408 cmp_xe_offset, swap_xe);
4409
4410 /* Move all name/values to the end of the bucket. */
4411 xe = xh->xh_entries;
4412 end = OCFS2_XATTR_BUCKET_SIZE;
4413 for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) {
4414 offset = le16_to_cpu(xe->xe_name_offset);
Joel Becker199799a2009-08-14 19:04:15 -07004415 len = namevalue_size_xe(xe);
Tao Ma01225592008-08-18 17:38:53 +08004416
4417 /*
4418 * We must make sure that the name/value pair
4419 * exist in the same block. So adjust end to
4420 * the previous block end if needed.
4421 */
4422 if (((end - len) / blocksize !=
4423 (end - 1) / blocksize))
4424 end = end - end % blocksize;
4425
4426 if (end > offset + len) {
4427 memmove(bucket_buf + end - len,
4428 bucket_buf + offset, len);
4429 xe->xe_name_offset = cpu_to_le16(end - len);
4430 }
4431
4432 mlog_bug_on_msg(end < offset + len, "Defrag check failed for "
4433 "bucket %llu\n", (unsigned long long)blkno);
4434
4435 end -= len;
4436 }
4437
4438 mlog_bug_on_msg(xh_free_start > end, "Defrag check failed for "
4439 "bucket %llu\n", (unsigned long long)blkno);
4440
4441 if (xh_free_start == end)
Tao Ma85db90e2008-11-12 08:27:01 +08004442 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004443
4444 memset(bucket_buf + xh_free_start, 0, end - xh_free_start);
4445 xh->xh_free_start = cpu_to_le16(end);
4446
4447 /* sort the entries by their name_hash. */
4448 sort(entries, le16_to_cpu(xh->xh_count),
4449 sizeof(struct ocfs2_xattr_entry),
4450 cmp_xe, swap_xe);
4451
4452 buf = bucket_buf;
Tao Ma1c32a2f2008-11-06 08:10:47 +08004453 for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
4454 memcpy(bucket_block(bucket, i), buf, blocksize);
4455 ocfs2_xattr_bucket_journal_dirty(handle, bucket);
Tao Ma01225592008-08-18 17:38:53 +08004456
Tao Ma01225592008-08-18 17:38:53 +08004457out:
Tao Ma01225592008-08-18 17:38:53 +08004458 kfree(bucket_buf);
4459 return ret;
4460}
4461
4462/*
Joel Beckerb5c03e72008-11-25 19:58:16 -08004463 * prev_blkno points to the start of an existing extent. new_blkno
4464 * points to a newly allocated extent. Because we know each of our
4465 * clusters contains more than bucket, we can easily split one cluster
4466 * at a bucket boundary. So we take the last cluster of the existing
4467 * extent and split it down the middle. We move the last half of the
4468 * buckets in the last cluster of the existing extent over to the new
4469 * extent.
Tao Ma01225592008-08-18 17:38:53 +08004470 *
Joel Beckerb5c03e72008-11-25 19:58:16 -08004471 * first_bh is the buffer at prev_blkno so we can update the existing
4472 * extent's bucket count. header_bh is the bucket were we were hoping
4473 * to insert our xattr. If the bucket move places the target in the new
4474 * extent, we'll update first_bh and header_bh after modifying the old
4475 * extent.
4476 *
4477 * first_hash will be set as the 1st xe's name_hash in the new extent.
Tao Ma01225592008-08-18 17:38:53 +08004478 */
4479static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode,
4480 handle_t *handle,
Joel Becker41cb8142008-11-26 14:25:21 -08004481 struct ocfs2_xattr_bucket *first,
4482 struct ocfs2_xattr_bucket *target,
Tao Ma01225592008-08-18 17:38:53 +08004483 u64 new_blkno,
Tao Ma01225592008-08-18 17:38:53 +08004484 u32 num_clusters,
4485 u32 *first_hash)
4486{
Joel Beckerc58b6032008-11-26 13:36:24 -08004487 int ret;
Joel Becker41cb8142008-11-26 14:25:21 -08004488 struct super_block *sb = inode->i_sb;
4489 int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(sb);
4490 int num_buckets = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
Joel Beckerb5c03e72008-11-25 19:58:16 -08004491 int to_move = num_buckets / 2;
Joel Beckerc58b6032008-11-26 13:36:24 -08004492 u64 src_blkno;
Joel Becker41cb8142008-11-26 14:25:21 -08004493 u64 last_cluster_blkno = bucket_blkno(first) +
4494 ((num_clusters - 1) * ocfs2_clusters_to_blocks(sb, 1));
Tao Ma01225592008-08-18 17:38:53 +08004495
Joel Becker41cb8142008-11-26 14:25:21 -08004496 BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets);
4497 BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize);
Tao Ma01225592008-08-18 17:38:53 +08004498
Tao Ma402b4182011-02-23 22:01:17 +08004499 trace_ocfs2_mv_xattr_bucket_cross_cluster(
4500 (unsigned long long)last_cluster_blkno,
4501 (unsigned long long)new_blkno);
Tao Ma01225592008-08-18 17:38:53 +08004502
Joel Becker41cb8142008-11-26 14:25:21 -08004503 ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first),
Joel Beckerc58b6032008-11-26 13:36:24 -08004504 last_cluster_blkno, new_blkno,
4505 to_move, first_hash);
Joel Beckerb5c03e72008-11-25 19:58:16 -08004506 if (ret) {
4507 mlog_errno(ret);
4508 goto out;
4509 }
4510
Joel Beckerc58b6032008-11-26 13:36:24 -08004511 /* This is the first bucket that got moved */
4512 src_blkno = last_cluster_blkno + (to_move * blks_per_bucket);
4513
Tao Ma01225592008-08-18 17:38:53 +08004514 /*
Joel Beckerc58b6032008-11-26 13:36:24 -08004515 * If the target bucket was part of the moved buckets, we need to
Joel Becker41cb8142008-11-26 14:25:21 -08004516 * update first and target.
Joel Beckerb5c03e72008-11-25 19:58:16 -08004517 */
Joel Becker41cb8142008-11-26 14:25:21 -08004518 if (bucket_blkno(target) >= src_blkno) {
Joel Beckerb5c03e72008-11-25 19:58:16 -08004519 /* Find the block for the new target bucket */
4520 src_blkno = new_blkno +
Joel Becker41cb8142008-11-26 14:25:21 -08004521 (bucket_blkno(target) - src_blkno);
4522
4523 ocfs2_xattr_bucket_relse(first);
4524 ocfs2_xattr_bucket_relse(target);
Joel Beckerb5c03e72008-11-25 19:58:16 -08004525
4526 /*
Joel Beckerc58b6032008-11-26 13:36:24 -08004527 * These shouldn't fail - the buffers are in the
Joel Beckerb5c03e72008-11-25 19:58:16 -08004528 * journal from ocfs2_cp_xattr_bucket().
4529 */
Joel Becker41cb8142008-11-26 14:25:21 -08004530 ret = ocfs2_read_xattr_bucket(first, new_blkno);
Joel Beckerc58b6032008-11-26 13:36:24 -08004531 if (ret) {
4532 mlog_errno(ret);
4533 goto out;
4534 }
Joel Becker41cb8142008-11-26 14:25:21 -08004535 ret = ocfs2_read_xattr_bucket(target, src_blkno);
4536 if (ret)
Joel Beckerb5c03e72008-11-25 19:58:16 -08004537 mlog_errno(ret);
Joel Beckerb5c03e72008-11-25 19:58:16 -08004538
Joel Beckerb5c03e72008-11-25 19:58:16 -08004539 }
4540
Tao Ma01225592008-08-18 17:38:53 +08004541out:
Tao Ma01225592008-08-18 17:38:53 +08004542 return ret;
4543}
4544
Tao Ma01225592008-08-18 17:38:53 +08004545/*
Tao Ma80bcaf32008-10-27 06:06:24 +08004546 * Find the suitable pos when we divide a bucket into 2.
4547 * We have to make sure the xattrs with the same hash value exist
4548 * in the same bucket.
4549 *
4550 * If this ocfs2_xattr_header covers more than one hash value, find a
4551 * place where the hash value changes. Try to find the most even split.
4552 * The most common case is that all entries have different hash values,
4553 * and the first check we make will find a place to split.
Tao Ma01225592008-08-18 17:38:53 +08004554 */
Tao Ma80bcaf32008-10-27 06:06:24 +08004555static int ocfs2_xattr_find_divide_pos(struct ocfs2_xattr_header *xh)
4556{
4557 struct ocfs2_xattr_entry *entries = xh->xh_entries;
4558 int count = le16_to_cpu(xh->xh_count);
4559 int delta, middle = count / 2;
4560
4561 /*
4562 * We start at the middle. Each step gets farther away in both
4563 * directions. We therefore hit the change in hash value
4564 * nearest to the middle. Note that this loop does not execute for
4565 * count < 2.
4566 */
4567 for (delta = 0; delta < middle; delta++) {
4568 /* Let's check delta earlier than middle */
4569 if (cmp_xe(&entries[middle - delta - 1],
4570 &entries[middle - delta]))
4571 return middle - delta;
4572
4573 /* For even counts, don't walk off the end */
4574 if ((middle + delta + 1) == count)
4575 continue;
4576
4577 /* Now try delta past middle */
4578 if (cmp_xe(&entries[middle + delta],
4579 &entries[middle + delta + 1]))
4580 return middle + delta + 1;
4581 }
4582
4583 /* Every entry had the same hash */
4584 return count;
4585}
4586
4587/*
4588 * Move some xattrs in old bucket(blk) to new bucket(new_blk).
4589 * first_hash will record the 1st hash of the new bucket.
4590 *
4591 * Normally half of the xattrs will be moved. But we have to make
4592 * sure that the xattrs with the same hash value are stored in the
4593 * same bucket. If all the xattrs in this bucket have the same hash
4594 * value, the new bucket will be initialized as an empty one and the
4595 * first_hash will be initialized as (hash_value+1).
4596 */
4597static int ocfs2_divide_xattr_bucket(struct inode *inode,
4598 handle_t *handle,
4599 u64 blk,
4600 u64 new_blk,
4601 u32 *first_hash,
4602 int new_bucket_head)
Tao Ma01225592008-08-18 17:38:53 +08004603{
4604 int ret, i;
Joel Becker199799a2009-08-14 19:04:15 -07004605 int count, start, len, name_value_len = 0, name_offset = 0;
Joel Beckerba937122008-10-24 19:13:20 -07004606 struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
Tao Ma01225592008-08-18 17:38:53 +08004607 struct ocfs2_xattr_header *xh;
4608 struct ocfs2_xattr_entry *xe;
4609 int blocksize = inode->i_sb->s_blocksize;
4610
Tao Ma402b4182011-02-23 22:01:17 +08004611 trace_ocfs2_divide_xattr_bucket_begin((unsigned long long)blk,
4612 (unsigned long long)new_blk);
Tao Ma01225592008-08-18 17:38:53 +08004613
Joel Beckerba937122008-10-24 19:13:20 -07004614 s_bucket = ocfs2_xattr_bucket_new(inode);
4615 t_bucket = ocfs2_xattr_bucket_new(inode);
4616 if (!s_bucket || !t_bucket) {
4617 ret = -ENOMEM;
4618 mlog_errno(ret);
4619 goto out;
4620 }
Tao Ma01225592008-08-18 17:38:53 +08004621
Joel Beckerba937122008-10-24 19:13:20 -07004622 ret = ocfs2_read_xattr_bucket(s_bucket, blk);
Tao Ma01225592008-08-18 17:38:53 +08004623 if (ret) {
4624 mlog_errno(ret);
4625 goto out;
4626 }
4627
Joel Beckerba937122008-10-24 19:13:20 -07004628 ret = ocfs2_xattr_bucket_journal_access(handle, s_bucket,
Joel Becker1224be02008-10-24 18:47:33 -07004629 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08004630 if (ret) {
4631 mlog_errno(ret);
4632 goto out;
4633 }
4634
Joel Becker784b8162008-10-24 17:33:40 -07004635 /*
4636 * Even if !new_bucket_head, we're overwriting t_bucket. Thus,
4637 * there's no need to read it.
4638 */
Joel Beckerba937122008-10-24 19:13:20 -07004639 ret = ocfs2_init_xattr_bucket(t_bucket, new_blk);
Tao Ma01225592008-08-18 17:38:53 +08004640 if (ret) {
4641 mlog_errno(ret);
4642 goto out;
4643 }
4644
Joel Becker2b656c12008-11-25 19:00:15 -08004645 /*
4646 * Hey, if we're overwriting t_bucket, what difference does
4647 * ACCESS_CREATE vs ACCESS_WRITE make? See the comment in the
4648 * same part of ocfs2_cp_xattr_bucket().
4649 */
Joel Beckerba937122008-10-24 19:13:20 -07004650 ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket,
Joel Becker1224be02008-10-24 18:47:33 -07004651 new_bucket_head ?
4652 OCFS2_JOURNAL_ACCESS_CREATE :
4653 OCFS2_JOURNAL_ACCESS_WRITE);
4654 if (ret) {
4655 mlog_errno(ret);
4656 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004657 }
4658
Joel Beckerba937122008-10-24 19:13:20 -07004659 xh = bucket_xh(s_bucket);
Tao Ma80bcaf32008-10-27 06:06:24 +08004660 count = le16_to_cpu(xh->xh_count);
4661 start = ocfs2_xattr_find_divide_pos(xh);
4662
4663 if (start == count) {
4664 xe = &xh->xh_entries[start-1];
4665
4666 /*
4667 * initialized a new empty bucket here.
4668 * The hash value is set as one larger than
4669 * that of the last entry in the previous bucket.
4670 */
Joel Beckerba937122008-10-24 19:13:20 -07004671 for (i = 0; i < t_bucket->bu_blocks; i++)
4672 memset(bucket_block(t_bucket, i), 0, blocksize);
Tao Ma80bcaf32008-10-27 06:06:24 +08004673
Joel Beckerba937122008-10-24 19:13:20 -07004674 xh = bucket_xh(t_bucket);
Tao Ma80bcaf32008-10-27 06:06:24 +08004675 xh->xh_free_start = cpu_to_le16(blocksize);
4676 xh->xh_entries[0].xe_name_hash = xe->xe_name_hash;
4677 le32_add_cpu(&xh->xh_entries[0].xe_name_hash, 1);
4678
4679 goto set_num_buckets;
4680 }
4681
Tao Ma01225592008-08-18 17:38:53 +08004682 /* copy the whole bucket to the new first. */
Joel Beckerba937122008-10-24 19:13:20 -07004683 ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004684
4685 /* update the new bucket. */
Joel Beckerba937122008-10-24 19:13:20 -07004686 xh = bucket_xh(t_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004687
4688 /*
4689 * Calculate the total name/value len and xh_free_start for
4690 * the old bucket first.
4691 */
4692 name_offset = OCFS2_XATTR_BUCKET_SIZE;
4693 name_value_len = 0;
4694 for (i = 0; i < start; i++) {
4695 xe = &xh->xh_entries[i];
Joel Becker199799a2009-08-14 19:04:15 -07004696 name_value_len += namevalue_size_xe(xe);
Tao Ma01225592008-08-18 17:38:53 +08004697 if (le16_to_cpu(xe->xe_name_offset) < name_offset)
4698 name_offset = le16_to_cpu(xe->xe_name_offset);
4699 }
4700
4701 /*
4702 * Now begin the modification to the new bucket.
4703 *
4704 * In the new bucket, We just move the xattr entry to the beginning
4705 * and don't touch the name/value. So there will be some holes in the
4706 * bucket, and they will be removed when ocfs2_defrag_xattr_bucket is
4707 * called.
4708 */
4709 xe = &xh->xh_entries[start];
4710 len = sizeof(struct ocfs2_xattr_entry) * (count - start);
Tao Ma402b4182011-02-23 22:01:17 +08004711 trace_ocfs2_divide_xattr_bucket_move(len,
4712 (int)((char *)xe - (char *)xh),
4713 (int)((char *)xh->xh_entries - (char *)xh));
Tao Ma01225592008-08-18 17:38:53 +08004714 memmove((char *)xh->xh_entries, (char *)xe, len);
4715 xe = &xh->xh_entries[count - start];
4716 len = sizeof(struct ocfs2_xattr_entry) * start;
4717 memset((char *)xe, 0, len);
4718
4719 le16_add_cpu(&xh->xh_count, -start);
4720 le16_add_cpu(&xh->xh_name_value_len, -name_value_len);
4721
4722 /* Calculate xh_free_start for the new bucket. */
4723 xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
4724 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
4725 xe = &xh->xh_entries[i];
Tao Ma01225592008-08-18 17:38:53 +08004726 if (le16_to_cpu(xe->xe_name_offset) <
4727 le16_to_cpu(xh->xh_free_start))
4728 xh->xh_free_start = xe->xe_name_offset;
4729 }
4730
Tao Ma80bcaf32008-10-27 06:06:24 +08004731set_num_buckets:
Tao Ma01225592008-08-18 17:38:53 +08004732 /* set xh->xh_num_buckets for the new xh. */
4733 if (new_bucket_head)
4734 xh->xh_num_buckets = cpu_to_le16(1);
4735 else
4736 xh->xh_num_buckets = 0;
4737
Joel Beckerba937122008-10-24 19:13:20 -07004738 ocfs2_xattr_bucket_journal_dirty(handle, t_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004739
4740 /* store the first_hash of the new bucket. */
4741 if (first_hash)
4742 *first_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash);
4743
4744 /*
Tao Ma80bcaf32008-10-27 06:06:24 +08004745 * Now only update the 1st block of the old bucket. If we
4746 * just added a new empty bucket, there is no need to modify
4747 * it.
Tao Ma01225592008-08-18 17:38:53 +08004748 */
Tao Ma80bcaf32008-10-27 06:06:24 +08004749 if (start == count)
4750 goto out;
4751
Joel Beckerba937122008-10-24 19:13:20 -07004752 xh = bucket_xh(s_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004753 memset(&xh->xh_entries[start], 0,
4754 sizeof(struct ocfs2_xattr_entry) * (count - start));
4755 xh->xh_count = cpu_to_le16(start);
4756 xh->xh_free_start = cpu_to_le16(name_offset);
4757 xh->xh_name_value_len = cpu_to_le16(name_value_len);
4758
Joel Beckerba937122008-10-24 19:13:20 -07004759 ocfs2_xattr_bucket_journal_dirty(handle, s_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004760
4761out:
Joel Beckerba937122008-10-24 19:13:20 -07004762 ocfs2_xattr_bucket_free(s_bucket);
4763 ocfs2_xattr_bucket_free(t_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004764
4765 return ret;
4766}
4767
4768/*
4769 * Copy xattr from one bucket to another bucket.
4770 *
4771 * The caller must make sure that the journal transaction
4772 * has enough space for journaling.
4773 */
4774static int ocfs2_cp_xattr_bucket(struct inode *inode,
4775 handle_t *handle,
4776 u64 s_blkno,
4777 u64 t_blkno,
4778 int t_is_new)
4779{
Joel Becker4980c6d2008-10-24 18:54:43 -07004780 int ret;
Joel Beckerba937122008-10-24 19:13:20 -07004781 struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
Tao Ma01225592008-08-18 17:38:53 +08004782
4783 BUG_ON(s_blkno == t_blkno);
4784
Tao Ma402b4182011-02-23 22:01:17 +08004785 trace_ocfs2_cp_xattr_bucket((unsigned long long)s_blkno,
4786 (unsigned long long)t_blkno,
4787 t_is_new);
Tao Ma01225592008-08-18 17:38:53 +08004788
Joel Beckerba937122008-10-24 19:13:20 -07004789 s_bucket = ocfs2_xattr_bucket_new(inode);
4790 t_bucket = ocfs2_xattr_bucket_new(inode);
4791 if (!s_bucket || !t_bucket) {
4792 ret = -ENOMEM;
4793 mlog_errno(ret);
4794 goto out;
4795 }
Joel Becker92de1092008-11-25 17:06:40 -08004796
Joel Beckerba937122008-10-24 19:13:20 -07004797 ret = ocfs2_read_xattr_bucket(s_bucket, s_blkno);
Tao Ma01225592008-08-18 17:38:53 +08004798 if (ret)
4799 goto out;
4800
Joel Becker784b8162008-10-24 17:33:40 -07004801 /*
4802 * Even if !t_is_new, we're overwriting t_bucket. Thus,
4803 * there's no need to read it.
4804 */
Joel Beckerba937122008-10-24 19:13:20 -07004805 ret = ocfs2_init_xattr_bucket(t_bucket, t_blkno);
Tao Ma01225592008-08-18 17:38:53 +08004806 if (ret)
4807 goto out;
4808
Joel Becker2b656c12008-11-25 19:00:15 -08004809 /*
4810 * Hey, if we're overwriting t_bucket, what difference does
4811 * ACCESS_CREATE vs ACCESS_WRITE make? Well, if we allocated a new
Joel Becker874d65a2008-11-26 13:02:18 -08004812 * cluster to fill, we came here from
4813 * ocfs2_mv_xattr_buckets(), and it is really new -
4814 * ACCESS_CREATE is required. But we also might have moved data
4815 * out of t_bucket before extending back into it.
4816 * ocfs2_add_new_xattr_bucket() can do this - its call to
4817 * ocfs2_add_new_xattr_cluster() may have created a new extent
Joel Becker2b656c12008-11-25 19:00:15 -08004818 * and copied out the end of the old extent. Then it re-extends
4819 * the old extent back to create space for new xattrs. That's
4820 * how we get here, and the bucket isn't really new.
4821 */
Joel Beckerba937122008-10-24 19:13:20 -07004822 ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket,
Joel Becker1224be02008-10-24 18:47:33 -07004823 t_is_new ?
4824 OCFS2_JOURNAL_ACCESS_CREATE :
4825 OCFS2_JOURNAL_ACCESS_WRITE);
4826 if (ret)
4827 goto out;
Tao Ma01225592008-08-18 17:38:53 +08004828
Joel Beckerba937122008-10-24 19:13:20 -07004829 ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket);
4830 ocfs2_xattr_bucket_journal_dirty(handle, t_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004831
4832out:
Joel Beckerba937122008-10-24 19:13:20 -07004833 ocfs2_xattr_bucket_free(t_bucket);
4834 ocfs2_xattr_bucket_free(s_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004835
4836 return ret;
4837}
4838
4839/*
Joel Becker874d65a2008-11-26 13:02:18 -08004840 * src_blk points to the start of an existing extent. last_blk points to
4841 * last cluster in that extent. to_blk points to a newly allocated
Joel Becker54ecb6b2008-11-26 13:18:31 -08004842 * extent. We copy the buckets from the cluster at last_blk to the new
4843 * extent. If start_bucket is non-zero, we skip that many buckets before
4844 * we start copying. The new extent's xh_num_buckets gets set to the
4845 * number of buckets we copied. The old extent's xh_num_buckets shrinks
4846 * by the same amount.
Tao Ma01225592008-08-18 17:38:53 +08004847 */
Joel Becker54ecb6b2008-11-26 13:18:31 -08004848static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
4849 u64 src_blk, u64 last_blk, u64 to_blk,
4850 unsigned int start_bucket,
4851 u32 *first_hash)
Tao Ma01225592008-08-18 17:38:53 +08004852{
4853 int i, ret, credits;
4854 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Joel Becker15d60922008-11-25 18:36:42 -08004855 int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Ma01225592008-08-18 17:38:53 +08004856 int num_buckets = ocfs2_xattr_buckets_per_cluster(osb);
Joel Becker15d60922008-11-25 18:36:42 -08004857 struct ocfs2_xattr_bucket *old_first, *new_first;
Tao Ma01225592008-08-18 17:38:53 +08004858
Tao Ma402b4182011-02-23 22:01:17 +08004859 trace_ocfs2_mv_xattr_buckets((unsigned long long)last_blk,
4860 (unsigned long long)to_blk);
Tao Ma01225592008-08-18 17:38:53 +08004861
Joel Becker54ecb6b2008-11-26 13:18:31 -08004862 BUG_ON(start_bucket >= num_buckets);
4863 if (start_bucket) {
4864 num_buckets -= start_bucket;
4865 last_blk += (start_bucket * blks_per_bucket);
4866 }
4867
Joel Becker15d60922008-11-25 18:36:42 -08004868 /* The first bucket of the original extent */
4869 old_first = ocfs2_xattr_bucket_new(inode);
4870 /* The first bucket of the new extent */
4871 new_first = ocfs2_xattr_bucket_new(inode);
4872 if (!old_first || !new_first) {
4873 ret = -ENOMEM;
4874 mlog_errno(ret);
4875 goto out;
4876 }
4877
Joel Becker874d65a2008-11-26 13:02:18 -08004878 ret = ocfs2_read_xattr_bucket(old_first, src_blk);
Joel Becker15d60922008-11-25 18:36:42 -08004879 if (ret) {
4880 mlog_errno(ret);
4881 goto out;
4882 }
4883
Tao Ma01225592008-08-18 17:38:53 +08004884 /*
Joel Becker54ecb6b2008-11-26 13:18:31 -08004885 * We need to update the first bucket of the old extent and all
4886 * the buckets going to the new extent.
Tao Ma01225592008-08-18 17:38:53 +08004887 */
Tao Mac901fb02010-04-26 14:34:57 +08004888 credits = ((num_buckets + 1) * blks_per_bucket);
Tao Ma01225592008-08-18 17:38:53 +08004889 ret = ocfs2_extend_trans(handle, credits);
4890 if (ret) {
4891 mlog_errno(ret);
4892 goto out;
4893 }
4894
Joel Becker15d60922008-11-25 18:36:42 -08004895 ret = ocfs2_xattr_bucket_journal_access(handle, old_first,
4896 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08004897 if (ret) {
4898 mlog_errno(ret);
4899 goto out;
4900 }
4901
4902 for (i = 0; i < num_buckets; i++) {
4903 ret = ocfs2_cp_xattr_bucket(inode, handle,
Joel Becker874d65a2008-11-26 13:02:18 -08004904 last_blk + (i * blks_per_bucket),
Joel Becker15d60922008-11-25 18:36:42 -08004905 to_blk + (i * blks_per_bucket),
4906 1);
Tao Ma01225592008-08-18 17:38:53 +08004907 if (ret) {
4908 mlog_errno(ret);
4909 goto out;
4910 }
Tao Ma01225592008-08-18 17:38:53 +08004911 }
4912
Joel Becker15d60922008-11-25 18:36:42 -08004913 /*
4914 * Get the new bucket ready before we dirty anything
4915 * (This actually shouldn't fail, because we already dirtied
4916 * it once in ocfs2_cp_xattr_bucket()).
4917 */
4918 ret = ocfs2_read_xattr_bucket(new_first, to_blk);
4919 if (ret) {
Tao Ma01225592008-08-18 17:38:53 +08004920 mlog_errno(ret);
4921 goto out;
4922 }
Joel Becker15d60922008-11-25 18:36:42 -08004923 ret = ocfs2_xattr_bucket_journal_access(handle, new_first,
4924 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08004925 if (ret) {
4926 mlog_errno(ret);
4927 goto out;
4928 }
4929
Joel Becker15d60922008-11-25 18:36:42 -08004930 /* Now update the headers */
4931 le16_add_cpu(&bucket_xh(old_first)->xh_num_buckets, -num_buckets);
4932 ocfs2_xattr_bucket_journal_dirty(handle, old_first);
Tao Ma01225592008-08-18 17:38:53 +08004933
Joel Becker15d60922008-11-25 18:36:42 -08004934 bucket_xh(new_first)->xh_num_buckets = cpu_to_le16(num_buckets);
4935 ocfs2_xattr_bucket_journal_dirty(handle, new_first);
Tao Ma01225592008-08-18 17:38:53 +08004936
4937 if (first_hash)
Joel Becker15d60922008-11-25 18:36:42 -08004938 *first_hash = le32_to_cpu(bucket_xh(new_first)->xh_entries[0].xe_name_hash);
4939
Tao Ma01225592008-08-18 17:38:53 +08004940out:
Joel Becker15d60922008-11-25 18:36:42 -08004941 ocfs2_xattr_bucket_free(new_first);
4942 ocfs2_xattr_bucket_free(old_first);
Tao Ma01225592008-08-18 17:38:53 +08004943 return ret;
4944}
4945
4946/*
Tao Ma80bcaf32008-10-27 06:06:24 +08004947 * Move some xattrs in this cluster to the new cluster.
Tao Ma01225592008-08-18 17:38:53 +08004948 * This function should only be called when bucket size == cluster size.
4949 * Otherwise ocfs2_mv_xattr_bucket_cross_cluster should be used instead.
4950 */
Tao Ma80bcaf32008-10-27 06:06:24 +08004951static int ocfs2_divide_xattr_cluster(struct inode *inode,
4952 handle_t *handle,
4953 u64 prev_blk,
4954 u64 new_blk,
4955 u32 *first_hash)
Tao Ma01225592008-08-18 17:38:53 +08004956{
4957 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Mac901fb02010-04-26 14:34:57 +08004958 int ret, credits = 2 * blk_per_bucket;
Tao Ma01225592008-08-18 17:38:53 +08004959
4960 BUG_ON(OCFS2_XATTR_BUCKET_SIZE < OCFS2_SB(inode->i_sb)->s_clustersize);
4961
4962 ret = ocfs2_extend_trans(handle, credits);
4963 if (ret) {
4964 mlog_errno(ret);
4965 return ret;
4966 }
4967
4968 /* Move half of the xattr in start_blk to the next bucket. */
Tao Ma80bcaf32008-10-27 06:06:24 +08004969 return ocfs2_divide_xattr_bucket(inode, handle, prev_blk,
4970 new_blk, first_hash, 1);
Tao Ma01225592008-08-18 17:38:53 +08004971}
4972
4973/*
4974 * Move some xattrs from the old cluster to the new one since they are not
4975 * contiguous in ocfs2 xattr tree.
4976 *
4977 * new_blk starts a new separate cluster, and we will move some xattrs from
4978 * prev_blk to it. v_start will be set as the first name hash value in this
4979 * new cluster so that it can be used as e_cpos during tree insertion and
4980 * don't collide with our original b-tree operations. first_bh and header_bh
4981 * will also be updated since they will be used in ocfs2_extend_xattr_bucket
4982 * to extend the insert bucket.
4983 *
4984 * The problem is how much xattr should we move to the new one and when should
4985 * we update first_bh and header_bh?
4986 * 1. If cluster size > bucket size, that means the previous cluster has more
4987 * than 1 bucket, so just move half nums of bucket into the new cluster and
4988 * update the first_bh and header_bh if the insert bucket has been moved
4989 * to the new cluster.
4990 * 2. If cluster_size == bucket_size:
4991 * a) If the previous extent rec has more than one cluster and the insert
4992 * place isn't in the last cluster, copy the entire last cluster to the
4993 * new one. This time, we don't need to upate the first_bh and header_bh
4994 * since they will not be moved into the new cluster.
4995 * b) Otherwise, move the bottom half of the xattrs in the last cluster into
4996 * the new one. And we set the extend flag to zero if the insert place is
4997 * moved into the new allocated cluster since no extend is needed.
4998 */
4999static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode,
5000 handle_t *handle,
Joel Becker012ee912008-11-26 14:43:31 -08005001 struct ocfs2_xattr_bucket *first,
5002 struct ocfs2_xattr_bucket *target,
Tao Ma01225592008-08-18 17:38:53 +08005003 u64 new_blk,
Tao Ma01225592008-08-18 17:38:53 +08005004 u32 prev_clusters,
5005 u32 *v_start,
5006 int *extend)
5007{
Joel Becker92cf3ad2008-11-26 14:12:09 -08005008 int ret;
Tao Ma01225592008-08-18 17:38:53 +08005009
Tao Ma402b4182011-02-23 22:01:17 +08005010 trace_ocfs2_adjust_xattr_cross_cluster(
5011 (unsigned long long)bucket_blkno(first),
5012 (unsigned long long)new_blk, prev_clusters);
Tao Ma01225592008-08-18 17:38:53 +08005013
Joel Becker41cb8142008-11-26 14:25:21 -08005014 if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) {
Tao Ma01225592008-08-18 17:38:53 +08005015 ret = ocfs2_mv_xattr_bucket_cross_cluster(inode,
5016 handle,
Joel Becker41cb8142008-11-26 14:25:21 -08005017 first, target,
Tao Ma01225592008-08-18 17:38:53 +08005018 new_blk,
Tao Ma01225592008-08-18 17:38:53 +08005019 prev_clusters,
5020 v_start);
Joel Becker012ee912008-11-26 14:43:31 -08005021 if (ret)
Joel Becker41cb8142008-11-26 14:25:21 -08005022 mlog_errno(ret);
Joel Becker41cb8142008-11-26 14:25:21 -08005023 } else {
Joel Becker92cf3ad2008-11-26 14:12:09 -08005024 /* The start of the last cluster in the first extent */
5025 u64 last_blk = bucket_blkno(first) +
5026 ((prev_clusters - 1) *
5027 ocfs2_clusters_to_blocks(inode->i_sb, 1));
Tao Ma01225592008-08-18 17:38:53 +08005028
Joel Becker012ee912008-11-26 14:43:31 -08005029 if (prev_clusters > 1 && bucket_blkno(target) != last_blk) {
Joel Becker874d65a2008-11-26 13:02:18 -08005030 ret = ocfs2_mv_xattr_buckets(inode, handle,
Joel Becker92cf3ad2008-11-26 14:12:09 -08005031 bucket_blkno(first),
Joel Becker54ecb6b2008-11-26 13:18:31 -08005032 last_blk, new_blk, 0,
Tao Ma01225592008-08-18 17:38:53 +08005033 v_start);
Joel Becker012ee912008-11-26 14:43:31 -08005034 if (ret)
5035 mlog_errno(ret);
5036 } else {
Tao Ma80bcaf32008-10-27 06:06:24 +08005037 ret = ocfs2_divide_xattr_cluster(inode, handle,
5038 last_blk, new_blk,
5039 v_start);
Joel Becker012ee912008-11-26 14:43:31 -08005040 if (ret)
5041 mlog_errno(ret);
Tao Ma01225592008-08-18 17:38:53 +08005042
Joel Becker92cf3ad2008-11-26 14:12:09 -08005043 if ((bucket_blkno(target) == last_blk) && extend)
Tao Ma01225592008-08-18 17:38:53 +08005044 *extend = 0;
5045 }
5046 }
5047
5048 return ret;
5049}
5050
5051/*
5052 * Add a new cluster for xattr storage.
5053 *
5054 * If the new cluster is contiguous with the previous one, it will be
5055 * appended to the same extent record, and num_clusters will be updated.
5056 * If not, we will insert a new extent for it and move some xattrs in
5057 * the last cluster into the new allocated one.
5058 * We also need to limit the maximum size of a btree leaf, otherwise we'll
5059 * lose the benefits of hashing because we'll have to search large leaves.
5060 * So now the maximum size is OCFS2_MAX_XATTR_TREE_LEAF_SIZE(or clustersize,
5061 * if it's bigger).
5062 *
5063 * first_bh is the first block of the previous extent rec and header_bh
5064 * indicates the bucket we will insert the new xattrs. They will be updated
5065 * when the header_bh is moved into the new cluster.
5066 */
5067static int ocfs2_add_new_xattr_cluster(struct inode *inode,
5068 struct buffer_head *root_bh,
Joel Beckered29c0c2008-11-26 15:08:44 -08005069 struct ocfs2_xattr_bucket *first,
5070 struct ocfs2_xattr_bucket *target,
Tao Ma01225592008-08-18 17:38:53 +08005071 u32 *num_clusters,
5072 u32 prev_cpos,
Tao Ma78f30c32008-11-12 08:27:00 +08005073 int *extend,
5074 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08005075{
Tao Ma85db90e2008-11-12 08:27:01 +08005076 int ret;
Tao Ma01225592008-08-18 17:38:53 +08005077 u16 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
5078 u32 prev_clusters = *num_clusters;
5079 u32 clusters_to_add = 1, bit_off, num_bits, v_start = 0;
5080 u64 block;
Tao Ma85db90e2008-11-12 08:27:01 +08005081 handle_t *handle = ctxt->handle;
Tao Ma01225592008-08-18 17:38:53 +08005082 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Joel Beckerf99b9b72008-08-20 19:36:33 -07005083 struct ocfs2_extent_tree et;
Tao Ma01225592008-08-18 17:38:53 +08005084
Tao Ma402b4182011-02-23 22:01:17 +08005085 trace_ocfs2_add_new_xattr_cluster_begin(
5086 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5087 (unsigned long long)bucket_blkno(first),
5088 prev_cpos, prev_clusters);
Tao Ma01225592008-08-18 17:38:53 +08005089
Joel Becker5e404e92009-02-13 03:54:22 -08005090 ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
Joel Beckerf99b9b72008-08-20 19:36:33 -07005091
Joel Becker0cf2f762009-02-12 16:41:25 -08005092 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
Joel Becker84008972008-12-09 16:11:49 -08005093 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08005094 if (ret < 0) {
5095 mlog_errno(ret);
5096 goto leave;
5097 }
5098
Joel Becker1ed9b772010-05-06 13:59:06 +08005099 ret = __ocfs2_claim_clusters(handle, ctxt->data_ac, 1,
Tao Ma01225592008-08-18 17:38:53 +08005100 clusters_to_add, &bit_off, &num_bits);
5101 if (ret < 0) {
5102 if (ret != -ENOSPC)
5103 mlog_errno(ret);
5104 goto leave;
5105 }
5106
5107 BUG_ON(num_bits > clusters_to_add);
5108
5109 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
Tao Ma402b4182011-02-23 22:01:17 +08005110 trace_ocfs2_add_new_xattr_cluster((unsigned long long)block, num_bits);
Tao Ma01225592008-08-18 17:38:53 +08005111
Joel Beckered29c0c2008-11-26 15:08:44 -08005112 if (bucket_blkno(first) + (prev_clusters * bpc) == block &&
Tao Ma01225592008-08-18 17:38:53 +08005113 (prev_clusters + num_bits) << osb->s_clustersize_bits <=
5114 OCFS2_MAX_XATTR_TREE_LEAF_SIZE) {
5115 /*
5116 * If this cluster is contiguous with the old one and
5117 * adding this new cluster, we don't surpass the limit of
5118 * OCFS2_MAX_XATTR_TREE_LEAF_SIZE, cool. We will let it be
5119 * initialized and used like other buckets in the previous
5120 * cluster.
5121 * So add it as a contiguous one. The caller will handle
5122 * its init process.
5123 */
5124 v_start = prev_cpos + prev_clusters;
5125 *num_clusters = prev_clusters + num_bits;
Tao Ma01225592008-08-18 17:38:53 +08005126 } else {
5127 ret = ocfs2_adjust_xattr_cross_cluster(inode,
5128 handle,
Joel Becker012ee912008-11-26 14:43:31 -08005129 first,
5130 target,
Tao Ma01225592008-08-18 17:38:53 +08005131 block,
Tao Ma01225592008-08-18 17:38:53 +08005132 prev_clusters,
5133 &v_start,
5134 extend);
5135 if (ret) {
5136 mlog_errno(ret);
5137 goto leave;
5138 }
5139 }
5140
Tao Ma402b4182011-02-23 22:01:17 +08005141 trace_ocfs2_add_new_xattr_cluster_insert((unsigned long long)block,
5142 v_start, num_bits);
Joel Beckercc79d8c2009-02-13 03:24:43 -08005143 ret = ocfs2_insert_extent(handle, &et, v_start, block,
Tao Ma78f30c32008-11-12 08:27:00 +08005144 num_bits, 0, ctxt->meta_ac);
Tao Ma01225592008-08-18 17:38:53 +08005145 if (ret < 0) {
5146 mlog_errno(ret);
5147 goto leave;
5148 }
5149
Joel Beckerec20cec2010-03-19 14:13:52 -07005150 ocfs2_journal_dirty(handle, root_bh);
Tao Ma01225592008-08-18 17:38:53 +08005151
5152leave:
Tao Ma01225592008-08-18 17:38:53 +08005153 return ret;
5154}
5155
5156/*
Joel Becker92de1092008-11-25 17:06:40 -08005157 * We are given an extent. 'first' is the bucket at the very front of
5158 * the extent. The extent has space for an additional bucket past
5159 * bucket_xh(first)->xh_num_buckets. 'target_blkno' is the block number
5160 * of the target bucket. We wish to shift every bucket past the target
5161 * down one, filling in that additional space. When we get back to the
5162 * target, we split the target between itself and the now-empty bucket
5163 * at target+1 (aka, target_blkno + blks_per_bucket).
Tao Ma01225592008-08-18 17:38:53 +08005164 */
5165static int ocfs2_extend_xattr_bucket(struct inode *inode,
Tao Ma85db90e2008-11-12 08:27:01 +08005166 handle_t *handle,
Joel Becker92de1092008-11-25 17:06:40 -08005167 struct ocfs2_xattr_bucket *first,
5168 u64 target_blk,
Tao Ma01225592008-08-18 17:38:53 +08005169 u32 num_clusters)
5170{
5171 int ret, credits;
5172 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5173 u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Joel Becker92de1092008-11-25 17:06:40 -08005174 u64 end_blk;
5175 u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets);
Tao Ma01225592008-08-18 17:38:53 +08005176
Tao Ma402b4182011-02-23 22:01:17 +08005177 trace_ocfs2_extend_xattr_bucket((unsigned long long)target_blk,
5178 (unsigned long long)bucket_blkno(first),
5179 num_clusters, new_bucket);
Tao Ma01225592008-08-18 17:38:53 +08005180
Joel Becker92de1092008-11-25 17:06:40 -08005181 /* The extent must have room for an additional bucket */
5182 BUG_ON(new_bucket >=
5183 (num_clusters * ocfs2_xattr_buckets_per_cluster(osb)));
Tao Ma01225592008-08-18 17:38:53 +08005184
Joel Becker92de1092008-11-25 17:06:40 -08005185 /* end_blk points to the last existing bucket */
5186 end_blk = bucket_blkno(first) + ((new_bucket - 1) * blk_per_bucket);
Tao Ma01225592008-08-18 17:38:53 +08005187
5188 /*
Joel Becker92de1092008-11-25 17:06:40 -08005189 * end_blk is the start of the last existing bucket.
5190 * Thus, (end_blk - target_blk) covers the target bucket and
5191 * every bucket after it up to, but not including, the last
5192 * existing bucket. Then we add the last existing bucket, the
5193 * new bucket, and the first bucket (3 * blk_per_bucket).
Tao Ma01225592008-08-18 17:38:53 +08005194 */
Tao Mac901fb02010-04-26 14:34:57 +08005195 credits = (end_blk - target_blk) + (3 * blk_per_bucket);
Tao Ma85db90e2008-11-12 08:27:01 +08005196 ret = ocfs2_extend_trans(handle, credits);
5197 if (ret) {
Tao Ma01225592008-08-18 17:38:53 +08005198 mlog_errno(ret);
5199 goto out;
5200 }
5201
Joel Becker92de1092008-11-25 17:06:40 -08005202 ret = ocfs2_xattr_bucket_journal_access(handle, first,
5203 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08005204 if (ret) {
5205 mlog_errno(ret);
Tao Ma85db90e2008-11-12 08:27:01 +08005206 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005207 }
5208
Joel Becker92de1092008-11-25 17:06:40 -08005209 while (end_blk != target_blk) {
Tao Ma01225592008-08-18 17:38:53 +08005210 ret = ocfs2_cp_xattr_bucket(inode, handle, end_blk,
5211 end_blk + blk_per_bucket, 0);
5212 if (ret)
Tao Ma85db90e2008-11-12 08:27:01 +08005213 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005214 end_blk -= blk_per_bucket;
5215 }
5216
Joel Becker92de1092008-11-25 17:06:40 -08005217 /* Move half of the xattr in target_blkno to the next bucket. */
5218 ret = ocfs2_divide_xattr_bucket(inode, handle, target_blk,
5219 target_blk + blk_per_bucket, NULL, 0);
Tao Ma01225592008-08-18 17:38:53 +08005220
Joel Becker92de1092008-11-25 17:06:40 -08005221 le16_add_cpu(&bucket_xh(first)->xh_num_buckets, 1);
5222 ocfs2_xattr_bucket_journal_dirty(handle, first);
Tao Ma01225592008-08-18 17:38:53 +08005223
Tao Ma01225592008-08-18 17:38:53 +08005224out:
5225 return ret;
5226}
5227
5228/*
Joel Becker91f20332008-11-26 15:25:41 -08005229 * Add new xattr bucket in an extent record and adjust the buckets
5230 * accordingly. xb_bh is the ocfs2_xattr_block, and target is the
5231 * bucket we want to insert into.
Tao Ma01225592008-08-18 17:38:53 +08005232 *
Joel Becker91f20332008-11-26 15:25:41 -08005233 * In the easy case, we will move all the buckets after target down by
5234 * one. Half of target's xattrs will be moved to the next bucket.
5235 *
5236 * If current cluster is full, we'll allocate a new one. This may not
5237 * be contiguous. The underlying calls will make sure that there is
5238 * space for the insert, shifting buckets around if necessary.
5239 * 'target' may be moved by those calls.
Tao Ma01225592008-08-18 17:38:53 +08005240 */
5241static int ocfs2_add_new_xattr_bucket(struct inode *inode,
5242 struct buffer_head *xb_bh,
Joel Becker91f20332008-11-26 15:25:41 -08005243 struct ocfs2_xattr_bucket *target,
Tao Ma78f30c32008-11-12 08:27:00 +08005244 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08005245{
Tao Ma01225592008-08-18 17:38:53 +08005246 struct ocfs2_xattr_block *xb =
5247 (struct ocfs2_xattr_block *)xb_bh->b_data;
5248 struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root;
5249 struct ocfs2_extent_list *el = &xb_root->xt_list;
Joel Becker91f20332008-11-26 15:25:41 -08005250 u32 name_hash =
5251 le32_to_cpu(bucket_xh(target)->xh_entries[0].xe_name_hash);
Joel Beckered29c0c2008-11-26 15:08:44 -08005252 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Tao Ma01225592008-08-18 17:38:53 +08005253 int ret, num_buckets, extend = 1;
5254 u64 p_blkno;
5255 u32 e_cpos, num_clusters;
Joel Becker92de1092008-11-25 17:06:40 -08005256 /* The bucket at the front of the extent */
Joel Becker91f20332008-11-26 15:25:41 -08005257 struct ocfs2_xattr_bucket *first;
Tao Ma01225592008-08-18 17:38:53 +08005258
Tao Ma402b4182011-02-23 22:01:17 +08005259 trace_ocfs2_add_new_xattr_bucket(
5260 (unsigned long long)bucket_blkno(target));
Tao Ma01225592008-08-18 17:38:53 +08005261
Joel Beckered29c0c2008-11-26 15:08:44 -08005262 /* The first bucket of the original extent */
Joel Becker92de1092008-11-25 17:06:40 -08005263 first = ocfs2_xattr_bucket_new(inode);
Joel Becker91f20332008-11-26 15:25:41 -08005264 if (!first) {
Joel Becker92de1092008-11-25 17:06:40 -08005265 ret = -ENOMEM;
5266 mlog_errno(ret);
5267 goto out;
5268 }
5269
Tao Ma01225592008-08-18 17:38:53 +08005270 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &e_cpos,
5271 &num_clusters, el);
5272 if (ret) {
5273 mlog_errno(ret);
5274 goto out;
5275 }
5276
Joel Beckered29c0c2008-11-26 15:08:44 -08005277 ret = ocfs2_read_xattr_bucket(first, p_blkno);
5278 if (ret) {
5279 mlog_errno(ret);
5280 goto out;
5281 }
5282
Tao Ma01225592008-08-18 17:38:53 +08005283 num_buckets = ocfs2_xattr_buckets_per_cluster(osb) * num_clusters;
Joel Beckered29c0c2008-11-26 15:08:44 -08005284 if (num_buckets == le16_to_cpu(bucket_xh(first)->xh_num_buckets)) {
5285 /*
5286 * This can move first+target if the target bucket moves
5287 * to the new extent.
5288 */
Tao Ma01225592008-08-18 17:38:53 +08005289 ret = ocfs2_add_new_xattr_cluster(inode,
5290 xb_bh,
Joel Beckered29c0c2008-11-26 15:08:44 -08005291 first,
5292 target,
Tao Ma01225592008-08-18 17:38:53 +08005293 &num_clusters,
5294 e_cpos,
Tao Ma78f30c32008-11-12 08:27:00 +08005295 &extend,
5296 ctxt);
Tao Ma01225592008-08-18 17:38:53 +08005297 if (ret) {
5298 mlog_errno(ret);
5299 goto out;
5300 }
5301 }
5302
Joel Becker92de1092008-11-25 17:06:40 -08005303 if (extend) {
Tao Ma01225592008-08-18 17:38:53 +08005304 ret = ocfs2_extend_xattr_bucket(inode,
Tao Ma85db90e2008-11-12 08:27:01 +08005305 ctxt->handle,
Joel Beckered29c0c2008-11-26 15:08:44 -08005306 first,
5307 bucket_blkno(target),
Tao Ma01225592008-08-18 17:38:53 +08005308 num_clusters);
Joel Becker92de1092008-11-25 17:06:40 -08005309 if (ret)
5310 mlog_errno(ret);
5311 }
5312
Tao Ma01225592008-08-18 17:38:53 +08005313out:
Joel Becker92de1092008-11-25 17:06:40 -08005314 ocfs2_xattr_bucket_free(first);
Joel Beckered29c0c2008-11-26 15:08:44 -08005315
Tao Ma01225592008-08-18 17:38:53 +08005316 return ret;
5317}
5318
5319static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode,
5320 struct ocfs2_xattr_bucket *bucket,
5321 int offs)
5322{
5323 int block_off = offs >> inode->i_sb->s_blocksize_bits;
5324
5325 offs = offs % inode->i_sb->s_blocksize;
Joel Becker51def392008-10-24 16:57:21 -07005326 return bucket_block(bucket, block_off) + offs;
Tao Ma01225592008-08-18 17:38:53 +08005327}
5328
5329/*
Tao Ma01225592008-08-18 17:38:53 +08005330 * Truncate the specified xe_off entry in xattr bucket.
5331 * bucket is indicated by header_bh and len is the new length.
5332 * Both the ocfs2_xattr_value_root and the entry will be updated here.
5333 *
5334 * Copy the new updated xe and xe_value_root to new_xe and new_xv if needed.
5335 */
5336static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
Joel Becker548b0f22008-11-24 19:32:13 -08005337 struct ocfs2_xattr_bucket *bucket,
Tao Ma01225592008-08-18 17:38:53 +08005338 int xe_off,
Tao Ma78f30c32008-11-12 08:27:00 +08005339 int len,
5340 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08005341{
5342 int ret, offset;
5343 u64 value_blk;
Tao Ma01225592008-08-18 17:38:53 +08005344 struct ocfs2_xattr_entry *xe;
Joel Becker548b0f22008-11-24 19:32:13 -08005345 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Ma01225592008-08-18 17:38:53 +08005346 size_t blocksize = inode->i_sb->s_blocksize;
Joel Beckerb3e5d372008-12-09 15:01:04 -08005347 struct ocfs2_xattr_value_buf vb = {
5348 .vb_access = ocfs2_journal_access,
5349 };
Tao Ma01225592008-08-18 17:38:53 +08005350
5351 xe = &xh->xh_entries[xe_off];
5352
5353 BUG_ON(!xe || ocfs2_xattr_is_local(xe));
5354
5355 offset = le16_to_cpu(xe->xe_name_offset) +
5356 OCFS2_XATTR_SIZE(xe->xe_name_len);
5357
5358 value_blk = offset / blocksize;
5359
5360 /* We don't allow ocfs2_xattr_value to be stored in different block. */
5361 BUG_ON(value_blk != (offset + OCFS2_XATTR_ROOT_SIZE - 1) / blocksize);
Tao Ma01225592008-08-18 17:38:53 +08005362
Joel Beckerb3e5d372008-12-09 15:01:04 -08005363 vb.vb_bh = bucket->bu_bhs[value_blk];
5364 BUG_ON(!vb.vb_bh);
Tao Ma01225592008-08-18 17:38:53 +08005365
Joel Beckerb3e5d372008-12-09 15:01:04 -08005366 vb.vb_xv = (struct ocfs2_xattr_value_root *)
5367 (vb.vb_bh->b_data + offset % blocksize);
Tao Ma01225592008-08-18 17:38:53 +08005368
Joel Becker548b0f22008-11-24 19:32:13 -08005369 /*
5370 * From here on out we have to dirty the bucket. The generic
5371 * value calls only modify one of the bucket's bhs, but we need
5372 * to send the bucket at once. So if they error, they *could* have
5373 * modified something. We have to assume they did, and dirty
5374 * the whole bucket. This leaves us in a consistent state.
5375 */
Tao Ma402b4182011-02-23 22:01:17 +08005376 trace_ocfs2_xattr_bucket_value_truncate(
5377 (unsigned long long)bucket_blkno(bucket), xe_off, len);
Joel Beckerb3e5d372008-12-09 15:01:04 -08005378 ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt);
Tao Ma01225592008-08-18 17:38:53 +08005379 if (ret) {
5380 mlog_errno(ret);
Tao Ma554e7f92009-01-08 08:21:43 +08005381 goto out;
5382 }
5383
5384 ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket,
5385 OCFS2_JOURNAL_ACCESS_WRITE);
5386 if (ret) {
5387 mlog_errno(ret);
5388 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005389 }
5390
Joel Becker548b0f22008-11-24 19:32:13 -08005391 xe->xe_value_size = cpu_to_le64(len);
5392
Joel Becker548b0f22008-11-24 19:32:13 -08005393 ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket);
Tao Ma01225592008-08-18 17:38:53 +08005394
5395out:
Tao Ma01225592008-08-18 17:38:53 +08005396 return ret;
5397}
5398
Tao Ma01225592008-08-18 17:38:53 +08005399static int ocfs2_rm_xattr_cluster(struct inode *inode,
5400 struct buffer_head *root_bh,
5401 u64 blkno,
5402 u32 cpos,
Tao Ma47bca492009-08-18 11:43:42 +08005403 u32 len,
5404 void *para)
Tao Ma01225592008-08-18 17:38:53 +08005405{
5406 int ret;
5407 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5408 struct inode *tl_inode = osb->osb_tl_inode;
5409 handle_t *handle;
5410 struct ocfs2_xattr_block *xb =
5411 (struct ocfs2_xattr_block *)root_bh->b_data;
Tao Ma01225592008-08-18 17:38:53 +08005412 struct ocfs2_alloc_context *meta_ac = NULL;
5413 struct ocfs2_cached_dealloc_ctxt dealloc;
Joel Beckerf99b9b72008-08-20 19:36:33 -07005414 struct ocfs2_extent_tree et;
5415
Tao Ma47bca492009-08-18 11:43:42 +08005416 ret = ocfs2_iterate_xattr_buckets(inode, blkno, len,
Tao Mace9c5a52009-08-18 11:43:59 +08005417 ocfs2_delete_xattr_in_bucket, para);
Tao Ma47bca492009-08-18 11:43:42 +08005418 if (ret) {
5419 mlog_errno(ret);
5420 return ret;
5421 }
5422
Joel Becker5e404e92009-02-13 03:54:22 -08005423 ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
Tao Ma01225592008-08-18 17:38:53 +08005424
5425 ocfs2_init_dealloc_ctxt(&dealloc);
5426
Tao Ma402b4182011-02-23 22:01:17 +08005427 trace_ocfs2_rm_xattr_cluster(
5428 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5429 (unsigned long long)blkno, cpos, len);
Tao Ma01225592008-08-18 17:38:53 +08005430
Joel Becker8cb471e2009-02-10 20:00:41 -08005431 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno,
5432 len);
Tao Ma01225592008-08-18 17:38:53 +08005433
Joel Beckerf99b9b72008-08-20 19:36:33 -07005434 ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac);
Tao Ma01225592008-08-18 17:38:53 +08005435 if (ret) {
5436 mlog_errno(ret);
5437 return ret;
5438 }
5439
5440 mutex_lock(&tl_inode->i_mutex);
5441
5442 if (ocfs2_truncate_log_needs_flush(osb)) {
5443 ret = __ocfs2_flush_truncate_log(osb);
5444 if (ret < 0) {
5445 mlog_errno(ret);
5446 goto out;
5447 }
5448 }
5449
Jan Karaa90714c2008-10-09 19:38:40 +02005450 handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb));
Tao Mad3264792008-10-24 07:57:28 +08005451 if (IS_ERR(handle)) {
Tao Ma01225592008-08-18 17:38:53 +08005452 ret = -ENOMEM;
5453 mlog_errno(ret);
5454 goto out;
5455 }
5456
Joel Becker0cf2f762009-02-12 16:41:25 -08005457 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
Joel Becker84008972008-12-09 16:11:49 -08005458 OCFS2_JOURNAL_ACCESS_WRITE);
Tao Ma01225592008-08-18 17:38:53 +08005459 if (ret) {
5460 mlog_errno(ret);
5461 goto out_commit;
5462 }
5463
Joel Beckerdbdcf6a2009-02-13 03:41:26 -08005464 ret = ocfs2_remove_extent(handle, &et, cpos, len, meta_ac,
Joel Beckerf99b9b72008-08-20 19:36:33 -07005465 &dealloc);
Tao Ma01225592008-08-18 17:38:53 +08005466 if (ret) {
5467 mlog_errno(ret);
5468 goto out_commit;
5469 }
5470
5471 le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, -len);
Joel Beckerec20cec2010-03-19 14:13:52 -07005472 ocfs2_journal_dirty(handle, root_bh);
Tao Ma01225592008-08-18 17:38:53 +08005473
5474 ret = ocfs2_truncate_log_append(osb, handle, blkno, len);
5475 if (ret)
5476 mlog_errno(ret);
5477
5478out_commit:
5479 ocfs2_commit_trans(osb, handle);
5480out:
5481 ocfs2_schedule_truncate_log_flush(osb, 1);
5482
5483 mutex_unlock(&tl_inode->i_mutex);
5484
5485 if (meta_ac)
5486 ocfs2_free_alloc_context(meta_ac);
5487
5488 ocfs2_run_deallocs(osb, &dealloc);
5489
5490 return ret;
5491}
5492
Tao Ma01225592008-08-18 17:38:53 +08005493/*
Tao Ma80bcaf32008-10-27 06:06:24 +08005494 * check whether the xattr bucket is filled up with the same hash value.
5495 * If we want to insert the xattr with the same hash, return -ENOSPC.
5496 * If we want to insert a xattr with different hash value, go ahead
5497 * and ocfs2_divide_xattr_bucket will handle this.
5498 */
Tao Ma01225592008-08-18 17:38:53 +08005499static int ocfs2_check_xattr_bucket_collision(struct inode *inode,
Tao Ma80bcaf32008-10-27 06:06:24 +08005500 struct ocfs2_xattr_bucket *bucket,
5501 const char *name)
Tao Ma01225592008-08-18 17:38:53 +08005502{
Joel Becker3e632942008-10-24 17:04:49 -07005503 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Ma80bcaf32008-10-27 06:06:24 +08005504 u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name));
5505
5506 if (name_hash != le32_to_cpu(xh->xh_entries[0].xe_name_hash))
5507 return 0;
Tao Ma01225592008-08-18 17:38:53 +08005508
5509 if (xh->xh_entries[le16_to_cpu(xh->xh_count) - 1].xe_name_hash ==
5510 xh->xh_entries[0].xe_name_hash) {
5511 mlog(ML_ERROR, "Too much hash collision in xattr bucket %llu, "
5512 "hash = %u\n",
Joel Becker9c7759a2008-10-24 16:21:03 -07005513 (unsigned long long)bucket_blkno(bucket),
Tao Ma01225592008-08-18 17:38:53 +08005514 le32_to_cpu(xh->xh_entries[0].xe_name_hash));
5515 return -ENOSPC;
5516 }
5517
5518 return 0;
5519}
5520
Joel Beckerc5d95df2009-08-18 21:03:24 -07005521/*
5522 * Try to set the entry in the current bucket. If we fail, the caller
5523 * will handle getting us another bucket.
5524 */
5525static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
5526 struct ocfs2_xattr_info *xi,
5527 struct ocfs2_xattr_search *xs,
5528 struct ocfs2_xattr_set_ctxt *ctxt)
5529{
5530 int ret;
5531 struct ocfs2_xa_loc loc;
5532
Tao Ma402b4182011-02-23 22:01:17 +08005533 trace_ocfs2_xattr_set_entry_bucket(xi->xi_name);
Joel Beckerc5d95df2009-08-18 21:03:24 -07005534
5535 ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket,
5536 xs->not_found ? NULL : xs->here);
5537 ret = ocfs2_xa_set(&loc, xi, ctxt);
5538 if (!ret) {
5539 xs->here = loc.xl_entry;
5540 goto out;
5541 }
5542 if (ret != -ENOSPC) {
5543 mlog_errno(ret);
5544 goto out;
5545 }
5546
5547 /* Ok, we need space. Let's try defragmenting the bucket. */
5548 ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
5549 xs->bucket);
5550 if (ret) {
5551 mlog_errno(ret);
5552 goto out;
5553 }
5554
5555 ret = ocfs2_xa_set(&loc, xi, ctxt);
5556 if (!ret) {
5557 xs->here = loc.xl_entry;
5558 goto out;
5559 }
5560 if (ret != -ENOSPC)
5561 mlog_errno(ret);
5562
5563
5564out:
Joel Beckerc5d95df2009-08-18 21:03:24 -07005565 return ret;
5566}
5567
Tao Ma01225592008-08-18 17:38:53 +08005568static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
5569 struct ocfs2_xattr_info *xi,
Tao Ma78f30c32008-11-12 08:27:00 +08005570 struct ocfs2_xattr_search *xs,
5571 struct ocfs2_xattr_set_ctxt *ctxt)
Tao Ma01225592008-08-18 17:38:53 +08005572{
Joel Beckerc5d95df2009-08-18 21:03:24 -07005573 int ret;
Tao Ma01225592008-08-18 17:38:53 +08005574
Tao Ma402b4182011-02-23 22:01:17 +08005575 trace_ocfs2_xattr_set_entry_index_block(xi->xi_name);
Tao Ma01225592008-08-18 17:38:53 +08005576
Joel Beckerc5d95df2009-08-18 21:03:24 -07005577 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
5578 if (!ret)
5579 goto out;
5580 if (ret != -ENOSPC) {
5581 mlog_errno(ret);
5582 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005583 }
5584
Joel Beckerc5d95df2009-08-18 21:03:24 -07005585 /* Ack, need more space. Let's try to get another bucket! */
5586
Tao Ma01225592008-08-18 17:38:53 +08005587 /*
Joel Beckerc5d95df2009-08-18 21:03:24 -07005588 * We do not allow for overlapping ranges between buckets. And
5589 * the maximum number of collisions we will allow for then is
5590 * one bucket's worth, so check it here whether we need to
5591 * add a new bucket for the insert.
Tao Ma01225592008-08-18 17:38:53 +08005592 */
Joel Beckerc5d95df2009-08-18 21:03:24 -07005593 ret = ocfs2_check_xattr_bucket_collision(inode,
Joel Becker91f20332008-11-26 15:25:41 -08005594 xs->bucket,
Joel Beckerc5d95df2009-08-18 21:03:24 -07005595 xi->xi_name);
5596 if (ret) {
5597 mlog_errno(ret);
5598 goto out;
Tao Ma01225592008-08-18 17:38:53 +08005599 }
5600
Joel Beckerc5d95df2009-08-18 21:03:24 -07005601 ret = ocfs2_add_new_xattr_bucket(inode,
5602 xs->xattr_bh,
5603 xs->bucket,
5604 ctxt);
5605 if (ret) {
5606 mlog_errno(ret);
5607 goto out;
5608 }
5609
5610 /*
5611 * ocfs2_add_new_xattr_bucket() will have updated
5612 * xs->bucket if it moved, but it will not have updated
5613 * any of the other search fields. Thus, we drop it and
5614 * re-search. Everything should be cached, so it'll be
5615 * quick.
5616 */
5617 ocfs2_xattr_bucket_relse(xs->bucket);
5618 ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
5619 xi->xi_name_index,
5620 xi->xi_name, xs);
5621 if (ret && ret != -ENODATA)
5622 goto out;
5623 xs->not_found = ret;
5624
5625 /* Ok, we have a new bucket, let's try again */
5626 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
5627 if (ret && (ret != -ENOSPC))
5628 mlog_errno(ret);
5629
Tao Ma01225592008-08-18 17:38:53 +08005630out:
Tao Ma01225592008-08-18 17:38:53 +08005631 return ret;
5632}
Tao Maa3944252008-08-18 17:38:54 +08005633
5634static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
5635 struct ocfs2_xattr_bucket *bucket,
5636 void *para)
5637{
Tao Mace9c5a52009-08-18 11:43:59 +08005638 int ret = 0, ref_credits;
Joel Becker3e632942008-10-24 17:04:49 -07005639 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
Tao Maa3944252008-08-18 17:38:54 +08005640 u16 i;
5641 struct ocfs2_xattr_entry *xe;
Tao Ma78f30c32008-11-12 08:27:00 +08005642 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5643 struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,};
Joel Becker548b0f22008-11-24 19:32:13 -08005644 int credits = ocfs2_remove_extent_credits(osb->sb) +
5645 ocfs2_blocks_per_xattr_bucket(inode->i_sb);
Tao Mace9c5a52009-08-18 11:43:59 +08005646 struct ocfs2_xattr_value_root *xv;
5647 struct ocfs2_rm_xattr_bucket_para *args =
5648 (struct ocfs2_rm_xattr_bucket_para *)para;
Tao Ma78f30c32008-11-12 08:27:00 +08005649
5650 ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
Tao Maa3944252008-08-18 17:38:54 +08005651
5652 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
5653 xe = &xh->xh_entries[i];
5654 if (ocfs2_xattr_is_local(xe))
5655 continue;
5656
Tao Mace9c5a52009-08-18 11:43:59 +08005657 ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket,
5658 i, &xv, NULL);
5659
5660 ret = ocfs2_lock_xattr_remove_allocators(inode, xv,
5661 args->ref_ci,
5662 args->ref_root_bh,
5663 &ctxt.meta_ac,
5664 &ref_credits);
5665
5666 ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
Tao Ma88c3b062008-12-11 08:54:11 +08005667 if (IS_ERR(ctxt.handle)) {
5668 ret = PTR_ERR(ctxt.handle);
5669 mlog_errno(ret);
5670 break;
5671 }
5672
Joel Becker548b0f22008-11-24 19:32:13 -08005673 ret = ocfs2_xattr_bucket_value_truncate(inode, bucket,
Tao Ma78f30c32008-11-12 08:27:00 +08005674 i, 0, &ctxt);
Tao Ma88c3b062008-12-11 08:54:11 +08005675
5676 ocfs2_commit_trans(osb, ctxt.handle);
Tao Mace9c5a52009-08-18 11:43:59 +08005677 if (ctxt.meta_ac) {
5678 ocfs2_free_alloc_context(ctxt.meta_ac);
5679 ctxt.meta_ac = NULL;
5680 }
Tao Maa3944252008-08-18 17:38:54 +08005681 if (ret) {
5682 mlog_errno(ret);
5683 break;
5684 }
5685 }
5686
Tao Mace9c5a52009-08-18 11:43:59 +08005687 if (ctxt.meta_ac)
5688 ocfs2_free_alloc_context(ctxt.meta_ac);
Tao Ma78f30c32008-11-12 08:27:00 +08005689 ocfs2_schedule_truncate_log_flush(osb, 1);
5690 ocfs2_run_deallocs(osb, &ctxt.dealloc);
Tao Maa3944252008-08-18 17:38:54 +08005691 return ret;
5692}
5693
Mark Fasheh99219ae2008-10-07 14:52:59 -07005694/*
Tao Ma492a8a32009-08-18 11:43:17 +08005695 * Whenever we modify a xattr value root in the bucket(e.g, CoW
5696 * or change the extent record flag), we need to recalculate
5697 * the metaecc for the whole bucket. So it is done here.
5698 *
5699 * Note:
5700 * We have to give the extra credits for the caller.
5701 */
5702static int ocfs2_xattr_bucket_post_refcount(struct inode *inode,
5703 handle_t *handle,
5704 void *para)
5705{
5706 int ret;
5707 struct ocfs2_xattr_bucket *bucket =
5708 (struct ocfs2_xattr_bucket *)para;
5709
5710 ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
5711 OCFS2_JOURNAL_ACCESS_WRITE);
5712 if (ret) {
5713 mlog_errno(ret);
5714 return ret;
5715 }
5716
5717 ocfs2_xattr_bucket_journal_dirty(handle, bucket);
5718
5719 return 0;
5720}
5721
5722/*
5723 * Special action we need if the xattr value is refcounted.
5724 *
5725 * 1. If the xattr is refcounted, lock the tree.
5726 * 2. CoW the xattr if we are setting the new value and the value
5727 * will be stored outside.
5728 * 3. In other case, decrease_refcount will work for us, so just
5729 * lock the refcount tree, calculate the meta and credits is OK.
5730 *
5731 * We have to do CoW before ocfs2_init_xattr_set_ctxt since
5732 * currently CoW is a completed transaction, while this function
5733 * will also lock the allocators and let us deadlock. So we will
5734 * CoW the whole xattr value.
5735 */
5736static int ocfs2_prepare_refcount_xattr(struct inode *inode,
5737 struct ocfs2_dinode *di,
5738 struct ocfs2_xattr_info *xi,
5739 struct ocfs2_xattr_search *xis,
5740 struct ocfs2_xattr_search *xbs,
5741 struct ocfs2_refcount_tree **ref_tree,
5742 int *meta_add,
5743 int *credits)
5744{
5745 int ret = 0;
5746 struct ocfs2_xattr_block *xb;
5747 struct ocfs2_xattr_entry *xe;
5748 char *base;
5749 u32 p_cluster, num_clusters;
5750 unsigned int ext_flags;
5751 int name_offset, name_len;
5752 struct ocfs2_xattr_value_buf vb;
5753 struct ocfs2_xattr_bucket *bucket = NULL;
5754 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5755 struct ocfs2_post_refcount refcount;
5756 struct ocfs2_post_refcount *p = NULL;
5757 struct buffer_head *ref_root_bh = NULL;
5758
5759 if (!xis->not_found) {
5760 xe = xis->here;
5761 name_offset = le16_to_cpu(xe->xe_name_offset);
5762 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
5763 base = xis->base;
5764 vb.vb_bh = xis->inode_bh;
5765 vb.vb_access = ocfs2_journal_access_di;
5766 } else {
5767 int i, block_off = 0;
5768 xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
5769 xe = xbs->here;
5770 name_offset = le16_to_cpu(xe->xe_name_offset);
5771 name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
5772 i = xbs->here - xbs->header->xh_entries;
5773
5774 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
Tao Mafd68a892009-08-18 11:43:21 +08005775 ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
Tao Ma492a8a32009-08-18 11:43:17 +08005776 bucket_xh(xbs->bucket),
5777 i, &block_off,
5778 &name_offset);
5779 if (ret) {
5780 mlog_errno(ret);
5781 goto out;
5782 }
5783 base = bucket_block(xbs->bucket, block_off);
5784 vb.vb_bh = xbs->bucket->bu_bhs[block_off];
5785 vb.vb_access = ocfs2_journal_access;
5786
5787 if (ocfs2_meta_ecc(osb)) {
5788 /*create parameters for ocfs2_post_refcount. */
5789 bucket = xbs->bucket;
5790 refcount.credits = bucket->bu_blocks;
5791 refcount.para = bucket;
5792 refcount.func =
5793 ocfs2_xattr_bucket_post_refcount;
5794 p = &refcount;
5795 }
5796 } else {
5797 base = xbs->base;
5798 vb.vb_bh = xbs->xattr_bh;
5799 vb.vb_access = ocfs2_journal_access_xb;
5800 }
5801 }
5802
5803 if (ocfs2_xattr_is_local(xe))
5804 goto out;
5805
5806 vb.vb_xv = (struct ocfs2_xattr_value_root *)
5807 (base + name_offset + name_len);
5808
5809 ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
5810 &num_clusters, &vb.vb_xv->xr_list,
5811 &ext_flags);
5812 if (ret) {
5813 mlog_errno(ret);
5814 goto out;
5815 }
5816
5817 /*
5818 * We just need to check the 1st extent record, since we always
5819 * CoW the whole xattr. So there shouldn't be a xattr with
5820 * some REFCOUNT extent recs after the 1st one.
5821 */
5822 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
5823 goto out;
5824
5825 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
5826 1, ref_tree, &ref_root_bh);
5827 if (ret) {
5828 mlog_errno(ret);
5829 goto out;
5830 }
5831
5832 /*
5833 * If we are deleting the xattr or the new size will be stored inside,
5834 * cool, leave it there, the xattr truncate process will remove them
5835 * for us(it still needs the refcount tree lock and the meta, credits).
5836 * And the worse case is that every cluster truncate will split the
5837 * refcount tree, and make the original extent become 3. So we will need
5838 * 2 * cluster more extent recs at most.
5839 */
Joel Becker6b240ff2009-08-14 18:02:52 -07005840 if (!xi->xi_value || xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE) {
Tao Ma492a8a32009-08-18 11:43:17 +08005841
5842 ret = ocfs2_refcounted_xattr_delete_need(inode,
5843 &(*ref_tree)->rf_ci,
5844 ref_root_bh, vb.vb_xv,
5845 meta_add, credits);
5846 if (ret)
5847 mlog_errno(ret);
5848 goto out;
5849 }
5850
5851 ret = ocfs2_refcount_cow_xattr(inode, di, &vb,
5852 *ref_tree, ref_root_bh, 0,
5853 le32_to_cpu(vb.vb_xv->xr_clusters), p);
5854 if (ret)
5855 mlog_errno(ret);
5856
5857out:
5858 brelse(ref_root_bh);
5859 return ret;
5860}
5861
5862/*
Tao Ma01292412009-09-21 13:04:19 +08005863 * Add the REFCOUNTED flags for all the extent rec in ocfs2_xattr_value_root.
5864 * The physical clusters will be added to refcount tree.
5865 */
5866static int ocfs2_xattr_value_attach_refcount(struct inode *inode,
5867 struct ocfs2_xattr_value_root *xv,
5868 struct ocfs2_extent_tree *value_et,
5869 struct ocfs2_caching_info *ref_ci,
5870 struct buffer_head *ref_root_bh,
5871 struct ocfs2_cached_dealloc_ctxt *dealloc,
5872 struct ocfs2_post_refcount *refcount)
5873{
5874 int ret = 0;
5875 u32 clusters = le32_to_cpu(xv->xr_clusters);
5876 u32 cpos, p_cluster, num_clusters;
5877 struct ocfs2_extent_list *el = &xv->xr_list;
5878 unsigned int ext_flags;
5879
5880 cpos = 0;
5881 while (cpos < clusters) {
5882 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
5883 &num_clusters, el, &ext_flags);
5884
5885 cpos += num_clusters;
5886 if ((ext_flags & OCFS2_EXT_REFCOUNTED))
5887 continue;
5888
5889 BUG_ON(!p_cluster);
5890
5891 ret = ocfs2_add_refcount_flag(inode, value_et,
5892 ref_ci, ref_root_bh,
5893 cpos - num_clusters,
5894 p_cluster, num_clusters,
5895 dealloc, refcount);
5896 if (ret) {
5897 mlog_errno(ret);
5898 break;
5899 }
5900 }
5901
5902 return ret;
5903}
5904
5905/*
5906 * Given a normal ocfs2_xattr_header, refcount all the entries which
5907 * have value stored outside.
5908 * Used for xattrs stored in inode and ocfs2_xattr_block.
5909 */
5910static int ocfs2_xattr_attach_refcount_normal(struct inode *inode,
5911 struct ocfs2_xattr_value_buf *vb,
5912 struct ocfs2_xattr_header *header,
5913 struct ocfs2_caching_info *ref_ci,
5914 struct buffer_head *ref_root_bh,
5915 struct ocfs2_cached_dealloc_ctxt *dealloc)
5916{
5917
5918 struct ocfs2_xattr_entry *xe;
5919 struct ocfs2_xattr_value_root *xv;
5920 struct ocfs2_extent_tree et;
5921 int i, ret = 0;
5922
5923 for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
5924 xe = &header->xh_entries[i];
5925
5926 if (ocfs2_xattr_is_local(xe))
5927 continue;
5928
5929 xv = (struct ocfs2_xattr_value_root *)((void *)header +
5930 le16_to_cpu(xe->xe_name_offset) +
5931 OCFS2_XATTR_SIZE(xe->xe_name_len));
5932
5933 vb->vb_xv = xv;
5934 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
5935
5936 ret = ocfs2_xattr_value_attach_refcount(inode, xv, &et,
5937 ref_ci, ref_root_bh,
5938 dealloc, NULL);
5939 if (ret) {
5940 mlog_errno(ret);
5941 break;
5942 }
5943 }
5944
5945 return ret;
5946}
5947
5948static int ocfs2_xattr_inline_attach_refcount(struct inode *inode,
5949 struct buffer_head *fe_bh,
5950 struct ocfs2_caching_info *ref_ci,
5951 struct buffer_head *ref_root_bh,
5952 struct ocfs2_cached_dealloc_ctxt *dealloc)
5953{
5954 struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
5955 struct ocfs2_xattr_header *header = (struct ocfs2_xattr_header *)
5956 (fe_bh->b_data + inode->i_sb->s_blocksize -
5957 le16_to_cpu(di->i_xattr_inline_size));
5958 struct ocfs2_xattr_value_buf vb = {
5959 .vb_bh = fe_bh,
5960 .vb_access = ocfs2_journal_access_di,
5961 };
5962
5963 return ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
5964 ref_ci, ref_root_bh, dealloc);
5965}
5966
5967struct ocfs2_xattr_tree_value_refcount_para {
5968 struct ocfs2_caching_info *ref_ci;
5969 struct buffer_head *ref_root_bh;
5970 struct ocfs2_cached_dealloc_ctxt *dealloc;
5971};
5972
5973static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
5974 struct ocfs2_xattr_bucket *bucket,
5975 int offset,
5976 struct ocfs2_xattr_value_root **xv,
5977 struct buffer_head **bh)
5978{
5979 int ret, block_off, name_offset;
5980 struct ocfs2_xattr_header *xh = bucket_xh(bucket);
5981 struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
5982 void *base;
5983
5984 ret = ocfs2_xattr_bucket_get_name_value(sb,
5985 bucket_xh(bucket),
5986 offset,
5987 &block_off,
5988 &name_offset);
5989 if (ret) {
5990 mlog_errno(ret);
5991 goto out;
5992 }
5993
5994 base = bucket_block(bucket, block_off);
5995
5996 *xv = (struct ocfs2_xattr_value_root *)(base + name_offset +
5997 OCFS2_XATTR_SIZE(xe->xe_name_len));
5998
5999 if (bh)
6000 *bh = bucket->bu_bhs[block_off];
6001out:
6002 return ret;
6003}
6004
6005/*
6006 * For a given xattr bucket, refcount all the entries which
6007 * have value stored outside.
6008 */
6009static int ocfs2_xattr_bucket_value_refcount(struct inode *inode,
6010 struct ocfs2_xattr_bucket *bucket,
6011 void *para)
6012{
6013 int i, ret = 0;
6014 struct ocfs2_extent_tree et;
6015 struct ocfs2_xattr_tree_value_refcount_para *ref =
6016 (struct ocfs2_xattr_tree_value_refcount_para *)para;
6017 struct ocfs2_xattr_header *xh =
6018 (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
6019 struct ocfs2_xattr_entry *xe;
6020 struct ocfs2_xattr_value_buf vb = {
6021 .vb_access = ocfs2_journal_access,
6022 };
6023 struct ocfs2_post_refcount refcount = {
6024 .credits = bucket->bu_blocks,
6025 .para = bucket,
6026 .func = ocfs2_xattr_bucket_post_refcount,
6027 };
6028 struct ocfs2_post_refcount *p = NULL;
6029
6030 /* We only need post_refcount if we support metaecc. */
6031 if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)))
6032 p = &refcount;
6033
Tao Ma402b4182011-02-23 22:01:17 +08006034 trace_ocfs2_xattr_bucket_value_refcount(
6035 (unsigned long long)bucket_blkno(bucket),
6036 le16_to_cpu(xh->xh_count));
Tao Ma01292412009-09-21 13:04:19 +08006037 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
6038 xe = &xh->xh_entries[i];
6039
6040 if (ocfs2_xattr_is_local(xe))
6041 continue;
6042
6043 ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i,
6044 &vb.vb_xv, &vb.vb_bh);
6045 if (ret) {
6046 mlog_errno(ret);
6047 break;
6048 }
6049
6050 ocfs2_init_xattr_value_extent_tree(&et,
6051 INODE_CACHE(inode), &vb);
6052
6053 ret = ocfs2_xattr_value_attach_refcount(inode, vb.vb_xv,
6054 &et, ref->ref_ci,
6055 ref->ref_root_bh,
6056 ref->dealloc, p);
6057 if (ret) {
6058 mlog_errno(ret);
6059 break;
6060 }
6061 }
6062
6063 return ret;
6064
6065}
6066
6067static int ocfs2_refcount_xattr_tree_rec(struct inode *inode,
6068 struct buffer_head *root_bh,
6069 u64 blkno, u32 cpos, u32 len, void *para)
6070{
6071 return ocfs2_iterate_xattr_buckets(inode, blkno, len,
6072 ocfs2_xattr_bucket_value_refcount,
6073 para);
6074}
6075
6076static int ocfs2_xattr_block_attach_refcount(struct inode *inode,
6077 struct buffer_head *blk_bh,
6078 struct ocfs2_caching_info *ref_ci,
6079 struct buffer_head *ref_root_bh,
6080 struct ocfs2_cached_dealloc_ctxt *dealloc)
6081{
6082 int ret = 0;
6083 struct ocfs2_xattr_block *xb =
6084 (struct ocfs2_xattr_block *)blk_bh->b_data;
6085
6086 if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
6087 struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
6088 struct ocfs2_xattr_value_buf vb = {
6089 .vb_bh = blk_bh,
6090 .vb_access = ocfs2_journal_access_xb,
6091 };
6092
6093 ret = ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
6094 ref_ci, ref_root_bh,
6095 dealloc);
6096 } else {
6097 struct ocfs2_xattr_tree_value_refcount_para para = {
6098 .ref_ci = ref_ci,
6099 .ref_root_bh = ref_root_bh,
6100 .dealloc = dealloc,
6101 };
6102
6103 ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
6104 ocfs2_refcount_xattr_tree_rec,
6105 &para);
6106 }
6107
6108 return ret;
6109}
6110
6111int ocfs2_xattr_attach_refcount_tree(struct inode *inode,
6112 struct buffer_head *fe_bh,
6113 struct ocfs2_caching_info *ref_ci,
6114 struct buffer_head *ref_root_bh,
6115 struct ocfs2_cached_dealloc_ctxt *dealloc)
6116{
6117 int ret = 0;
6118 struct ocfs2_inode_info *oi = OCFS2_I(inode);
6119 struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
6120 struct buffer_head *blk_bh = NULL;
6121
6122 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
6123 ret = ocfs2_xattr_inline_attach_refcount(inode, fe_bh,
6124 ref_ci, ref_root_bh,
6125 dealloc);
6126 if (ret) {
6127 mlog_errno(ret);
6128 goto out;
6129 }
6130 }
6131
6132 if (!di->i_xattr_loc)
6133 goto out;
6134
6135 ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
6136 &blk_bh);
6137 if (ret < 0) {
6138 mlog_errno(ret);
6139 goto out;
6140 }
6141
6142 ret = ocfs2_xattr_block_attach_refcount(inode, blk_bh, ref_ci,
6143 ref_root_bh, dealloc);
6144 if (ret)
6145 mlog_errno(ret);
6146
6147 brelse(blk_bh);
6148out:
6149
6150 return ret;
6151}
6152
Tao Ma0fe9b662009-08-18 11:47:56 +08006153typedef int (should_xattr_reflinked)(struct ocfs2_xattr_entry *xe);
Tao Ma01292412009-09-21 13:04:19 +08006154/*
Tao Ma2999d122009-08-18 11:43:55 +08006155 * Store the information we need in xattr reflink.
6156 * old_bh and new_bh are inode bh for the old and new inode.
6157 */
6158struct ocfs2_xattr_reflink {
6159 struct inode *old_inode;
6160 struct inode *new_inode;
6161 struct buffer_head *old_bh;
6162 struct buffer_head *new_bh;
6163 struct ocfs2_caching_info *ref_ci;
6164 struct buffer_head *ref_root_bh;
6165 struct ocfs2_cached_dealloc_ctxt *dealloc;
Tao Ma0fe9b662009-08-18 11:47:56 +08006166 should_xattr_reflinked *xattr_reflinked;
Tao Ma2999d122009-08-18 11:43:55 +08006167};
6168
6169/*
6170 * Given a xattr header and xe offset,
6171 * return the proper xv and the corresponding bh.
6172 * xattr in inode, block and xattr tree have different implementaions.
6173 */
6174typedef int (get_xattr_value_root)(struct super_block *sb,
6175 struct buffer_head *bh,
6176 struct ocfs2_xattr_header *xh,
6177 int offset,
6178 struct ocfs2_xattr_value_root **xv,
6179 struct buffer_head **ret_bh,
6180 void *para);
6181
6182/*
6183 * Calculate all the xattr value root metadata stored in this xattr header and
6184 * credits we need if we create them from the scratch.
6185 * We use get_xattr_value_root so that all types of xattr container can use it.
6186 */
6187static int ocfs2_value_metas_in_xattr_header(struct super_block *sb,
6188 struct buffer_head *bh,
6189 struct ocfs2_xattr_header *xh,
6190 int *metas, int *credits,
6191 int *num_recs,
6192 get_xattr_value_root *func,
6193 void *para)
6194{
6195 int i, ret = 0;
6196 struct ocfs2_xattr_value_root *xv;
6197 struct ocfs2_xattr_entry *xe;
6198
6199 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
6200 xe = &xh->xh_entries[i];
6201 if (ocfs2_xattr_is_local(xe))
6202 continue;
6203
6204 ret = func(sb, bh, xh, i, &xv, NULL, para);
6205 if (ret) {
6206 mlog_errno(ret);
6207 break;
6208 }
6209
6210 *metas += le16_to_cpu(xv->xr_list.l_tree_depth) *
6211 le16_to_cpu(xv->xr_list.l_next_free_rec);
6212
6213 *credits += ocfs2_calc_extend_credits(sb,
6214 &def_xv.xv.xr_list,
6215 le32_to_cpu(xv->xr_clusters));
6216
6217 /*
6218 * If the value is a tree with depth > 1, We don't go deep
6219 * to the extent block, so just calculate a maximum record num.
6220 */
6221 if (!xv->xr_list.l_tree_depth)
Tao Ma8ff6af82009-12-23 14:31:15 +08006222 *num_recs += le16_to_cpu(xv->xr_list.l_next_free_rec);
Tao Ma2999d122009-08-18 11:43:55 +08006223 else
6224 *num_recs += ocfs2_clusters_for_bytes(sb,
6225 XATTR_SIZE_MAX);
6226 }
6227
6228 return ret;
6229}
6230
6231/* Used by xattr inode and block to return the right xv and buffer_head. */
6232static int ocfs2_get_xattr_value_root(struct super_block *sb,
6233 struct buffer_head *bh,
6234 struct ocfs2_xattr_header *xh,
6235 int offset,
6236 struct ocfs2_xattr_value_root **xv,
6237 struct buffer_head **ret_bh,
6238 void *para)
6239{
6240 struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
6241
6242 *xv = (struct ocfs2_xattr_value_root *)((void *)xh +
6243 le16_to_cpu(xe->xe_name_offset) +
6244 OCFS2_XATTR_SIZE(xe->xe_name_len));
6245
6246 if (ret_bh)
6247 *ret_bh = bh;
6248
6249 return 0;
6250}
6251
6252/*
6253 * Lock the meta_ac and caculate how much credits we need for reflink xattrs.
6254 * It is only used for inline xattr and xattr block.
6255 */
6256static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb,
6257 struct ocfs2_xattr_header *xh,
6258 struct buffer_head *ref_root_bh,
6259 int *credits,
6260 struct ocfs2_alloc_context **meta_ac)
6261{
6262 int ret, meta_add = 0, num_recs = 0;
6263 struct ocfs2_refcount_block *rb =
6264 (struct ocfs2_refcount_block *)ref_root_bh->b_data;
6265
6266 *credits = 0;
6267
6268 ret = ocfs2_value_metas_in_xattr_header(osb->sb, NULL, xh,
6269 &meta_add, credits, &num_recs,
6270 ocfs2_get_xattr_value_root,
6271 NULL);
6272 if (ret) {
6273 mlog_errno(ret);
6274 goto out;
6275 }
6276
6277 /*
6278 * We need to add/modify num_recs in refcount tree, so just calculate
6279 * an approximate number we need for refcount tree change.
6280 * Sometimes we need to split the tree, and after split, half recs
6281 * will be moved to the new block, and a new block can only provide
6282 * half number of recs. So we multiple new blocks by 2.
6283 */
6284 num_recs = num_recs / ocfs2_refcount_recs_per_rb(osb->sb) * 2;
6285 meta_add += num_recs;
6286 *credits += num_recs + num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
6287 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
6288 *credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
6289 le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
6290 else
6291 *credits += 1;
6292
6293 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, meta_ac);
6294 if (ret)
6295 mlog_errno(ret);
6296
6297out:
6298 return ret;
6299}
6300
6301/*
6302 * Given a xattr header, reflink all the xattrs in this container.
6303 * It can be used for inode, block and bucket.
6304 *
6305 * NOTE:
6306 * Before we call this function, the caller has memcpy the xattr in
6307 * old_xh to the new_xh.
Tao Ma0fe9b662009-08-18 11:47:56 +08006308 *
6309 * If args.xattr_reflinked is set, call it to decide whether the xe should
6310 * be reflinked or not. If not, remove it from the new xattr header.
Tao Ma2999d122009-08-18 11:43:55 +08006311 */
6312static int ocfs2_reflink_xattr_header(handle_t *handle,
6313 struct ocfs2_xattr_reflink *args,
6314 struct buffer_head *old_bh,
6315 struct ocfs2_xattr_header *xh,
6316 struct buffer_head *new_bh,
6317 struct ocfs2_xattr_header *new_xh,
6318 struct ocfs2_xattr_value_buf *vb,
6319 struct ocfs2_alloc_context *meta_ac,
6320 get_xattr_value_root *func,
6321 void *para)
6322{
Tao Ma0fe9b662009-08-18 11:47:56 +08006323 int ret = 0, i, j;
Tao Ma2999d122009-08-18 11:43:55 +08006324 struct super_block *sb = args->old_inode->i_sb;
6325 struct buffer_head *value_bh;
Tao Ma0fe9b662009-08-18 11:47:56 +08006326 struct ocfs2_xattr_entry *xe, *last;
Tao Ma2999d122009-08-18 11:43:55 +08006327 struct ocfs2_xattr_value_root *xv, *new_xv;
6328 struct ocfs2_extent_tree data_et;
6329 u32 clusters, cpos, p_cluster, num_clusters;
6330 unsigned int ext_flags = 0;
6331
Tao Ma402b4182011-02-23 22:01:17 +08006332 trace_ocfs2_reflink_xattr_header((unsigned long long)old_bh->b_blocknr,
6333 le16_to_cpu(xh->xh_count));
Tao Ma0fe9b662009-08-18 11:47:56 +08006334
6335 last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)];
6336 for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) {
Tao Ma2999d122009-08-18 11:43:55 +08006337 xe = &xh->xh_entries[i];
6338
Tao Ma0fe9b662009-08-18 11:47:56 +08006339 if (args->xattr_reflinked && !args->xattr_reflinked(xe)) {
6340 xe = &new_xh->xh_entries[j];
6341
6342 le16_add_cpu(&new_xh->xh_count, -1);
6343 if (new_xh->xh_count) {
6344 memmove(xe, xe + 1,
6345 (void *)last - (void *)xe);
6346 memset(last, 0,
6347 sizeof(struct ocfs2_xattr_entry));
6348 }
6349
6350 /*
6351 * We don't want j to increase in the next round since
6352 * it is already moved ahead.
6353 */
6354 j--;
6355 continue;
6356 }
6357
Tao Ma2999d122009-08-18 11:43:55 +08006358 if (ocfs2_xattr_is_local(xe))
6359 continue;
6360
6361 ret = func(sb, old_bh, xh, i, &xv, NULL, para);
6362 if (ret) {
6363 mlog_errno(ret);
6364 break;
6365 }
6366
Tao Ma0fe9b662009-08-18 11:47:56 +08006367 ret = func(sb, new_bh, new_xh, j, &new_xv, &value_bh, para);
Tao Ma2999d122009-08-18 11:43:55 +08006368 if (ret) {
6369 mlog_errno(ret);
6370 break;
6371 }
6372
6373 /*
6374 * For the xattr which has l_tree_depth = 0, all the extent
6375 * recs have already be copied to the new xh with the
6376 * propriate OCFS2_EXT_REFCOUNTED flag we just need to
6377 * increase the refount count int the refcount tree.
6378 *
6379 * For the xattr which has l_tree_depth > 0, we need
6380 * to initialize it to the empty default value root,
6381 * and then insert the extents one by one.
6382 */
6383 if (xv->xr_list.l_tree_depth) {
6384 memcpy(new_xv, &def_xv, sizeof(def_xv));
6385 vb->vb_xv = new_xv;
6386 vb->vb_bh = value_bh;
6387 ocfs2_init_xattr_value_extent_tree(&data_et,
6388 INODE_CACHE(args->new_inode), vb);
6389 }
6390
6391 clusters = le32_to_cpu(xv->xr_clusters);
6392 cpos = 0;
6393 while (cpos < clusters) {
6394 ret = ocfs2_xattr_get_clusters(args->old_inode,
6395 cpos,
6396 &p_cluster,
6397 &num_clusters,
6398 &xv->xr_list,
6399 &ext_flags);
6400 if (ret) {
6401 mlog_errno(ret);
6402 goto out;
6403 }
6404
6405 BUG_ON(!p_cluster);
6406
6407 if (xv->xr_list.l_tree_depth) {
6408 ret = ocfs2_insert_extent(handle,
6409 &data_et, cpos,
6410 ocfs2_clusters_to_blocks(
6411 args->old_inode->i_sb,
6412 p_cluster),
6413 num_clusters, ext_flags,
6414 meta_ac);
6415 if (ret) {
6416 mlog_errno(ret);
6417 goto out;
6418 }
6419 }
6420
6421 ret = ocfs2_increase_refcount(handle, args->ref_ci,
6422 args->ref_root_bh,
6423 p_cluster, num_clusters,
6424 meta_ac, args->dealloc);
6425 if (ret) {
6426 mlog_errno(ret);
6427 goto out;
6428 }
6429
6430 cpos += num_clusters;
6431 }
6432 }
6433
6434out:
6435 return ret;
6436}
6437
6438static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
6439{
6440 int ret = 0, credits = 0;
6441 handle_t *handle;
6442 struct ocfs2_super *osb = OCFS2_SB(args->old_inode->i_sb);
6443 struct ocfs2_dinode *di = (struct ocfs2_dinode *)args->old_bh->b_data;
6444 int inline_size = le16_to_cpu(di->i_xattr_inline_size);
6445 int header_off = osb->sb->s_blocksize - inline_size;
6446 struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)
6447 (args->old_bh->b_data + header_off);
6448 struct ocfs2_xattr_header *new_xh = (struct ocfs2_xattr_header *)
6449 (args->new_bh->b_data + header_off);
6450 struct ocfs2_alloc_context *meta_ac = NULL;
6451 struct ocfs2_inode_info *new_oi;
6452 struct ocfs2_dinode *new_di;
6453 struct ocfs2_xattr_value_buf vb = {
6454 .vb_bh = args->new_bh,
6455 .vb_access = ocfs2_journal_access_di,
6456 };
6457
6458 ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
6459 &credits, &meta_ac);
6460 if (ret) {
6461 mlog_errno(ret);
6462 goto out;
6463 }
6464
6465 handle = ocfs2_start_trans(osb, credits);
6466 if (IS_ERR(handle)) {
6467 ret = PTR_ERR(handle);
6468 mlog_errno(ret);
6469 goto out;
6470 }
6471
6472 ret = ocfs2_journal_access_di(handle, INODE_CACHE(args->new_inode),
6473 args->new_bh, OCFS2_JOURNAL_ACCESS_WRITE);
6474 if (ret) {
6475 mlog_errno(ret);
6476 goto out_commit;
6477 }
6478
6479 memcpy(args->new_bh->b_data + header_off,
6480 args->old_bh->b_data + header_off, inline_size);
6481
6482 new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
6483 new_di->i_xattr_inline_size = cpu_to_le16(inline_size);
6484
6485 ret = ocfs2_reflink_xattr_header(handle, args, args->old_bh, xh,
6486 args->new_bh, new_xh, &vb, meta_ac,
6487 ocfs2_get_xattr_value_root, NULL);
6488 if (ret) {
6489 mlog_errno(ret);
6490 goto out_commit;
6491 }
6492
6493 new_oi = OCFS2_I(args->new_inode);
Junxiao Bief962df2013-07-03 15:01:03 -07006494 /*
6495 * Adjust extent record count to reserve space for extended attribute.
6496 * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
6497 */
6498 if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
6499 !(ocfs2_inode_is_fast_symlink(args->new_inode))) {
6500 struct ocfs2_extent_list *el = &new_di->id2.i_list;
6501 le16_add_cpu(&el->l_count, -(inline_size /
6502 sizeof(struct ocfs2_extent_rec)));
6503 }
Tao Ma2999d122009-08-18 11:43:55 +08006504 spin_lock(&new_oi->ip_lock);
6505 new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
6506 new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
6507 spin_unlock(&new_oi->ip_lock);
6508
6509 ocfs2_journal_dirty(handle, args->new_bh);
6510
6511out_commit:
6512 ocfs2_commit_trans(osb, handle);
6513
6514out:
6515 if (meta_ac)
6516 ocfs2_free_alloc_context(meta_ac);
6517 return ret;
6518}
6519
6520static int ocfs2_create_empty_xattr_block(struct inode *inode,
6521 struct buffer_head *fe_bh,
6522 struct buffer_head **ret_bh,
6523 int indexed)
6524{
6525 int ret;
Tao Ma2999d122009-08-18 11:43:55 +08006526 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Tao Mab2317962010-03-19 15:04:24 +08006527 struct ocfs2_xattr_set_ctxt ctxt;
Tao Ma2999d122009-08-18 11:43:55 +08006528
Tao Mab2317962010-03-19 15:04:24 +08006529 memset(&ctxt, 0, sizeof(ctxt));
6530 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &ctxt.meta_ac);
Tao Ma2999d122009-08-18 11:43:55 +08006531 if (ret < 0) {
6532 mlog_errno(ret);
6533 return ret;
6534 }
6535
Joel Beckerd3981542009-08-19 02:13:50 -07006536 ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
6537 if (IS_ERR(ctxt.handle)) {
6538 ret = PTR_ERR(ctxt.handle);
Tao Ma2999d122009-08-18 11:43:55 +08006539 mlog_errno(ret);
6540 goto out;
6541 }
6542
Tao Ma402b4182011-02-23 22:01:17 +08006543 trace_ocfs2_create_empty_xattr_block(
6544 (unsigned long long)fe_bh->b_blocknr, indexed);
Joel Beckerd3981542009-08-19 02:13:50 -07006545 ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed,
6546 ret_bh);
Tao Ma2999d122009-08-18 11:43:55 +08006547 if (ret)
6548 mlog_errno(ret);
6549
Joel Beckerd3981542009-08-19 02:13:50 -07006550 ocfs2_commit_trans(osb, ctxt.handle);
Tao Ma2999d122009-08-18 11:43:55 +08006551out:
Tao Mab2317962010-03-19 15:04:24 +08006552 ocfs2_free_alloc_context(ctxt.meta_ac);
Tao Ma2999d122009-08-18 11:43:55 +08006553 return ret;
6554}
6555
6556static int ocfs2_reflink_xattr_block(struct ocfs2_xattr_reflink *args,
6557 struct buffer_head *blk_bh,
6558 struct buffer_head *new_blk_bh)
6559{
6560 int ret = 0, credits = 0;
6561 handle_t *handle;
6562 struct ocfs2_inode_info *new_oi = OCFS2_I(args->new_inode);
6563 struct ocfs2_dinode *new_di;
6564 struct ocfs2_super *osb = OCFS2_SB(args->new_inode->i_sb);
6565 int header_off = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
6566 struct ocfs2_xattr_block *xb =
6567 (struct ocfs2_xattr_block *)blk_bh->b_data;
6568 struct ocfs2_xattr_header *xh = &xb->xb_attrs.xb_header;
6569 struct ocfs2_xattr_block *new_xb =
6570 (struct ocfs2_xattr_block *)new_blk_bh->b_data;
6571 struct ocfs2_xattr_header *new_xh = &new_xb->xb_attrs.xb_header;
6572 struct ocfs2_alloc_context *meta_ac;
6573 struct ocfs2_xattr_value_buf vb = {
6574 .vb_bh = new_blk_bh,
6575 .vb_access = ocfs2_journal_access_xb,
6576 };
6577
6578 ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
6579 &credits, &meta_ac);
6580 if (ret) {
6581 mlog_errno(ret);
6582 return ret;
6583 }
6584
6585 /* One more credits in case we need to add xattr flags in new inode. */
6586 handle = ocfs2_start_trans(osb, credits + 1);
6587 if (IS_ERR(handle)) {
6588 ret = PTR_ERR(handle);
6589 mlog_errno(ret);
6590 goto out;
6591 }
6592
6593 if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
6594 ret = ocfs2_journal_access_di(handle,
6595 INODE_CACHE(args->new_inode),
6596 args->new_bh,
6597 OCFS2_JOURNAL_ACCESS_WRITE);
6598 if (ret) {
6599 mlog_errno(ret);
6600 goto out_commit;
6601 }
6602 }
6603
6604 ret = ocfs2_journal_access_xb(handle, INODE_CACHE(args->new_inode),
6605 new_blk_bh, OCFS2_JOURNAL_ACCESS_WRITE);
6606 if (ret) {
6607 mlog_errno(ret);
6608 goto out_commit;
6609 }
6610
6611 memcpy(new_blk_bh->b_data + header_off, blk_bh->b_data + header_off,
6612 osb->sb->s_blocksize - header_off);
6613
6614 ret = ocfs2_reflink_xattr_header(handle, args, blk_bh, xh,
6615 new_blk_bh, new_xh, &vb, meta_ac,
6616 ocfs2_get_xattr_value_root, NULL);
6617 if (ret) {
6618 mlog_errno(ret);
6619 goto out_commit;
6620 }
6621
6622 ocfs2_journal_dirty(handle, new_blk_bh);
6623
6624 if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
6625 new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
6626 spin_lock(&new_oi->ip_lock);
6627 new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
6628 new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
6629 spin_unlock(&new_oi->ip_lock);
6630
6631 ocfs2_journal_dirty(handle, args->new_bh);
6632 }
6633
6634out_commit:
6635 ocfs2_commit_trans(osb, handle);
6636
6637out:
6638 ocfs2_free_alloc_context(meta_ac);
6639 return ret;
6640}
6641
6642struct ocfs2_reflink_xattr_tree_args {
6643 struct ocfs2_xattr_reflink *reflink;
6644 struct buffer_head *old_blk_bh;
6645 struct buffer_head *new_blk_bh;
6646 struct ocfs2_xattr_bucket *old_bucket;
6647 struct ocfs2_xattr_bucket *new_bucket;
6648};
6649
6650/*
6651 * NOTE:
6652 * We have to handle the case that both old bucket and new bucket
6653 * will call this function to get the right ret_bh.
6654 * So The caller must give us the right bh.
6655 */
6656static int ocfs2_get_reflink_xattr_value_root(struct super_block *sb,
6657 struct buffer_head *bh,
6658 struct ocfs2_xattr_header *xh,
6659 int offset,
6660 struct ocfs2_xattr_value_root **xv,
6661 struct buffer_head **ret_bh,
6662 void *para)
6663{
6664 struct ocfs2_reflink_xattr_tree_args *args =
6665 (struct ocfs2_reflink_xattr_tree_args *)para;
6666 struct ocfs2_xattr_bucket *bucket;
6667
6668 if (bh == args->old_bucket->bu_bhs[0])
6669 bucket = args->old_bucket;
6670 else
6671 bucket = args->new_bucket;
6672
6673 return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
6674 xv, ret_bh);
6675}
6676
6677struct ocfs2_value_tree_metas {
6678 int num_metas;
6679 int credits;
6680 int num_recs;
6681};
6682
6683static int ocfs2_value_tree_metas_in_bucket(struct super_block *sb,
6684 struct buffer_head *bh,
6685 struct ocfs2_xattr_header *xh,
6686 int offset,
6687 struct ocfs2_xattr_value_root **xv,
6688 struct buffer_head **ret_bh,
6689 void *para)
6690{
6691 struct ocfs2_xattr_bucket *bucket =
6692 (struct ocfs2_xattr_bucket *)para;
6693
6694 return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
6695 xv, ret_bh);
6696}
6697
6698static int ocfs2_calc_value_tree_metas(struct inode *inode,
6699 struct ocfs2_xattr_bucket *bucket,
6700 void *para)
6701{
6702 struct ocfs2_value_tree_metas *metas =
6703 (struct ocfs2_value_tree_metas *)para;
6704 struct ocfs2_xattr_header *xh =
6705 (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
6706
6707 /* Add the credits for this bucket first. */
6708 metas->credits += bucket->bu_blocks;
6709 return ocfs2_value_metas_in_xattr_header(inode->i_sb, bucket->bu_bhs[0],
6710 xh, &metas->num_metas,
6711 &metas->credits, &metas->num_recs,
6712 ocfs2_value_tree_metas_in_bucket,
6713 bucket);
6714}
6715
6716/*
6717 * Given a xattr extent rec starting from blkno and having len clusters,
6718 * iterate all the buckets calculate how much metadata we need for reflinking
6719 * all the ocfs2_xattr_value_root and lock the allocators accordingly.
6720 */
6721static int ocfs2_lock_reflink_xattr_rec_allocators(
6722 struct ocfs2_reflink_xattr_tree_args *args,
6723 struct ocfs2_extent_tree *xt_et,
6724 u64 blkno, u32 len, int *credits,
6725 struct ocfs2_alloc_context **meta_ac,
6726 struct ocfs2_alloc_context **data_ac)
6727{
6728 int ret, num_free_extents;
6729 struct ocfs2_value_tree_metas metas;
6730 struct ocfs2_super *osb = OCFS2_SB(args->reflink->old_inode->i_sb);
6731 struct ocfs2_refcount_block *rb;
6732
6733 memset(&metas, 0, sizeof(metas));
6734
6735 ret = ocfs2_iterate_xattr_buckets(args->reflink->old_inode, blkno, len,
6736 ocfs2_calc_value_tree_metas, &metas);
6737 if (ret) {
6738 mlog_errno(ret);
6739 goto out;
6740 }
6741
6742 *credits = metas.credits;
6743
6744 /*
6745 * Calculate we need for refcount tree change.
6746 *
6747 * We need to add/modify num_recs in refcount tree, so just calculate
6748 * an approximate number we need for refcount tree change.
6749 * Sometimes we need to split the tree, and after split, half recs
6750 * will be moved to the new block, and a new block can only provide
6751 * half number of recs. So we multiple new blocks by 2.
6752 * In the end, we have to add credits for modifying the already
6753 * existed refcount block.
6754 */
6755 rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data;
6756 metas.num_recs =
6757 (metas.num_recs + ocfs2_refcount_recs_per_rb(osb->sb) - 1) /
6758 ocfs2_refcount_recs_per_rb(osb->sb) * 2;
6759 metas.num_metas += metas.num_recs;
6760 *credits += metas.num_recs +
6761 metas.num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
6762 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
6763 *credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
6764 le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
6765 else
6766 *credits += 1;
6767
6768 /* count in the xattr tree change. */
6769 num_free_extents = ocfs2_num_free_extents(osb, xt_et);
6770 if (num_free_extents < 0) {
6771 ret = num_free_extents;
6772 mlog_errno(ret);
6773 goto out;
6774 }
6775
6776 if (num_free_extents < len)
6777 metas.num_metas += ocfs2_extend_meta_needed(xt_et->et_root_el);
6778
6779 *credits += ocfs2_calc_extend_credits(osb->sb,
6780 xt_et->et_root_el, len);
6781
6782 if (metas.num_metas) {
6783 ret = ocfs2_reserve_new_metadata_blocks(osb, metas.num_metas,
6784 meta_ac);
6785 if (ret) {
6786 mlog_errno(ret);
6787 goto out;
6788 }
6789 }
6790
6791 if (len) {
6792 ret = ocfs2_reserve_clusters(osb, len, data_ac);
6793 if (ret)
6794 mlog_errno(ret);
6795 }
6796out:
6797 if (ret) {
6798 if (*meta_ac) {
6799 ocfs2_free_alloc_context(*meta_ac);
6800 meta_ac = NULL;
6801 }
6802 }
6803
6804 return ret;
6805}
6806
Tao Ma121a39b2010-07-09 14:53:12 +08006807static int ocfs2_reflink_xattr_bucket(handle_t *handle,
Tao Ma2999d122009-08-18 11:43:55 +08006808 u64 blkno, u64 new_blkno, u32 clusters,
Tao Ma121a39b2010-07-09 14:53:12 +08006809 u32 *cpos, int num_buckets,
Tao Ma2999d122009-08-18 11:43:55 +08006810 struct ocfs2_alloc_context *meta_ac,
6811 struct ocfs2_alloc_context *data_ac,
6812 struct ocfs2_reflink_xattr_tree_args *args)
6813{
6814 int i, j, ret = 0;
6815 struct super_block *sb = args->reflink->old_inode->i_sb;
Tao Ma2999d122009-08-18 11:43:55 +08006816 int bpb = args->old_bucket->bu_blocks;
6817 struct ocfs2_xattr_value_buf vb = {
6818 .vb_access = ocfs2_journal_access,
6819 };
6820
6821 for (i = 0; i < num_buckets; i++, blkno += bpb, new_blkno += bpb) {
6822 ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
6823 if (ret) {
6824 mlog_errno(ret);
6825 break;
6826 }
6827
6828 ret = ocfs2_init_xattr_bucket(args->new_bucket, new_blkno);
6829 if (ret) {
6830 mlog_errno(ret);
6831 break;
6832 }
6833
Tao Ma2999d122009-08-18 11:43:55 +08006834 ret = ocfs2_xattr_bucket_journal_access(handle,
6835 args->new_bucket,
6836 OCFS2_JOURNAL_ACCESS_CREATE);
6837 if (ret) {
6838 mlog_errno(ret);
6839 break;
6840 }
6841
6842 for (j = 0; j < bpb; j++)
6843 memcpy(bucket_block(args->new_bucket, j),
6844 bucket_block(args->old_bucket, j),
6845 sb->s_blocksize);
6846
Tao Ma121a39b2010-07-09 14:53:12 +08006847 /*
6848 * Record the start cpos so that we can use it to initialize
6849 * our xattr tree we also set the xh_num_bucket for the new
6850 * bucket.
6851 */
6852 if (i == 0) {
6853 *cpos = le32_to_cpu(bucket_xh(args->new_bucket)->
6854 xh_entries[0].xe_name_hash);
6855 bucket_xh(args->new_bucket)->xh_num_buckets =
6856 cpu_to_le16(num_buckets);
6857 }
6858
Tao Ma2999d122009-08-18 11:43:55 +08006859 ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
6860
6861 ret = ocfs2_reflink_xattr_header(handle, args->reflink,
6862 args->old_bucket->bu_bhs[0],
6863 bucket_xh(args->old_bucket),
6864 args->new_bucket->bu_bhs[0],
6865 bucket_xh(args->new_bucket),
6866 &vb, meta_ac,
6867 ocfs2_get_reflink_xattr_value_root,
6868 args);
6869 if (ret) {
6870 mlog_errno(ret);
6871 break;
6872 }
6873
6874 /*
6875 * Re-access and dirty the bucket to calculate metaecc.
6876 * Because we may extend the transaction in reflink_xattr_header
6877 * which will let the already accessed block gone.
6878 */
6879 ret = ocfs2_xattr_bucket_journal_access(handle,
6880 args->new_bucket,
6881 OCFS2_JOURNAL_ACCESS_WRITE);
6882 if (ret) {
6883 mlog_errno(ret);
6884 break;
6885 }
6886
6887 ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
Tao Ma121a39b2010-07-09 14:53:12 +08006888
Tao Ma2999d122009-08-18 11:43:55 +08006889 ocfs2_xattr_bucket_relse(args->old_bucket);
6890 ocfs2_xattr_bucket_relse(args->new_bucket);
6891 }
6892
6893 ocfs2_xattr_bucket_relse(args->old_bucket);
6894 ocfs2_xattr_bucket_relse(args->new_bucket);
6895 return ret;
6896}
Tao Ma121a39b2010-07-09 14:53:12 +08006897
6898static int ocfs2_reflink_xattr_buckets(handle_t *handle,
6899 struct inode *inode,
6900 struct ocfs2_reflink_xattr_tree_args *args,
6901 struct ocfs2_extent_tree *et,
6902 struct ocfs2_alloc_context *meta_ac,
6903 struct ocfs2_alloc_context *data_ac,
6904 u64 blkno, u32 cpos, u32 len)
6905{
6906 int ret, first_inserted = 0;
6907 u32 p_cluster, num_clusters, reflink_cpos = 0;
6908 u64 new_blkno;
6909 unsigned int num_buckets, reflink_buckets;
6910 unsigned int bpc =
6911 ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
6912
6913 ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
6914 if (ret) {
6915 mlog_errno(ret);
6916 goto out;
6917 }
6918 num_buckets = le16_to_cpu(bucket_xh(args->old_bucket)->xh_num_buckets);
6919 ocfs2_xattr_bucket_relse(args->old_bucket);
6920
6921 while (len && num_buckets) {
6922 ret = ocfs2_claim_clusters(handle, data_ac,
6923 1, &p_cluster, &num_clusters);
6924 if (ret) {
6925 mlog_errno(ret);
6926 goto out;
6927 }
6928
6929 new_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
6930 reflink_buckets = min(num_buckets, bpc * num_clusters);
6931
6932 ret = ocfs2_reflink_xattr_bucket(handle, blkno,
6933 new_blkno, num_clusters,
6934 &reflink_cpos, reflink_buckets,
6935 meta_ac, data_ac, args);
6936 if (ret) {
6937 mlog_errno(ret);
6938 goto out;
6939 }
6940
6941 /*
6942 * For the 1st allocated cluster, we make it use the same cpos
6943 * so that the xattr tree looks the same as the original one
6944 * in the most case.
6945 */
6946 if (!first_inserted) {
6947 reflink_cpos = cpos;
6948 first_inserted = 1;
6949 }
6950 ret = ocfs2_insert_extent(handle, et, reflink_cpos, new_blkno,
6951 num_clusters, 0, meta_ac);
6952 if (ret)
6953 mlog_errno(ret);
6954
Tao Ma402b4182011-02-23 22:01:17 +08006955 trace_ocfs2_reflink_xattr_buckets((unsigned long long)new_blkno,
6956 num_clusters, reflink_cpos);
Tao Ma121a39b2010-07-09 14:53:12 +08006957
6958 len -= num_clusters;
6959 blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
6960 num_buckets -= reflink_buckets;
6961 }
6962out:
6963 return ret;
6964}
6965
Tao Ma2999d122009-08-18 11:43:55 +08006966/*
6967 * Create the same xattr extent record in the new inode's xattr tree.
6968 */
6969static int ocfs2_reflink_xattr_rec(struct inode *inode,
6970 struct buffer_head *root_bh,
6971 u64 blkno,
6972 u32 cpos,
6973 u32 len,
6974 void *para)
6975{
6976 int ret, credits = 0;
Tao Ma2999d122009-08-18 11:43:55 +08006977 handle_t *handle;
6978 struct ocfs2_reflink_xattr_tree_args *args =
6979 (struct ocfs2_reflink_xattr_tree_args *)para;
6980 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
6981 struct ocfs2_alloc_context *meta_ac = NULL;
6982 struct ocfs2_alloc_context *data_ac = NULL;
6983 struct ocfs2_extent_tree et;
6984
Tao Ma402b4182011-02-23 22:01:17 +08006985 trace_ocfs2_reflink_xattr_rec((unsigned long long)blkno, len);
Tao Ma121a39b2010-07-09 14:53:12 +08006986
Tao Ma2999d122009-08-18 11:43:55 +08006987 ocfs2_init_xattr_tree_extent_tree(&et,
6988 INODE_CACHE(args->reflink->new_inode),
6989 args->new_blk_bh);
6990
6991 ret = ocfs2_lock_reflink_xattr_rec_allocators(args, &et, blkno,
6992 len, &credits,
6993 &meta_ac, &data_ac);
6994 if (ret) {
6995 mlog_errno(ret);
6996 goto out;
6997 }
6998
6999 handle = ocfs2_start_trans(osb, credits);
7000 if (IS_ERR(handle)) {
7001 ret = PTR_ERR(handle);
7002 mlog_errno(ret);
7003 goto out;
7004 }
7005
Tao Ma121a39b2010-07-09 14:53:12 +08007006 ret = ocfs2_reflink_xattr_buckets(handle, inode, args, &et,
7007 meta_ac, data_ac,
7008 blkno, cpos, len);
Tao Ma2999d122009-08-18 11:43:55 +08007009 if (ret)
7010 mlog_errno(ret);
7011
Tao Ma2999d122009-08-18 11:43:55 +08007012 ocfs2_commit_trans(osb, handle);
7013
7014out:
7015 if (meta_ac)
7016 ocfs2_free_alloc_context(meta_ac);
7017 if (data_ac)
7018 ocfs2_free_alloc_context(data_ac);
7019 return ret;
7020}
7021
7022/*
7023 * Create reflinked xattr buckets.
7024 * We will add bucket one by one, and refcount all the xattrs in the bucket
7025 * if they are stored outside.
7026 */
7027static int ocfs2_reflink_xattr_tree(struct ocfs2_xattr_reflink *args,
7028 struct buffer_head *blk_bh,
7029 struct buffer_head *new_blk_bh)
7030{
7031 int ret;
7032 struct ocfs2_reflink_xattr_tree_args para;
7033
7034 memset(&para, 0, sizeof(para));
7035 para.reflink = args;
7036 para.old_blk_bh = blk_bh;
7037 para.new_blk_bh = new_blk_bh;
7038
7039 para.old_bucket = ocfs2_xattr_bucket_new(args->old_inode);
7040 if (!para.old_bucket) {
7041 mlog_errno(-ENOMEM);
7042 return -ENOMEM;
7043 }
7044
7045 para.new_bucket = ocfs2_xattr_bucket_new(args->new_inode);
7046 if (!para.new_bucket) {
7047 ret = -ENOMEM;
7048 mlog_errno(ret);
7049 goto out;
7050 }
7051
7052 ret = ocfs2_iterate_xattr_index_block(args->old_inode, blk_bh,
7053 ocfs2_reflink_xattr_rec,
7054 &para);
7055 if (ret)
7056 mlog_errno(ret);
7057
7058out:
7059 ocfs2_xattr_bucket_free(para.old_bucket);
7060 ocfs2_xattr_bucket_free(para.new_bucket);
7061 return ret;
7062}
7063
7064static int ocfs2_reflink_xattr_in_block(struct ocfs2_xattr_reflink *args,
7065 struct buffer_head *blk_bh)
7066{
7067 int ret, indexed = 0;
7068 struct buffer_head *new_blk_bh = NULL;
7069 struct ocfs2_xattr_block *xb =
7070 (struct ocfs2_xattr_block *)blk_bh->b_data;
7071
7072
7073 if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)
7074 indexed = 1;
7075
7076 ret = ocfs2_create_empty_xattr_block(args->new_inode, args->new_bh,
7077 &new_blk_bh, indexed);
7078 if (ret) {
7079 mlog_errno(ret);
7080 goto out;
7081 }
7082
Jeff Liu2decd652010-10-12 11:18:18 +08007083 if (!indexed)
Tao Ma2999d122009-08-18 11:43:55 +08007084 ret = ocfs2_reflink_xattr_block(args, blk_bh, new_blk_bh);
7085 else
7086 ret = ocfs2_reflink_xattr_tree(args, blk_bh, new_blk_bh);
7087 if (ret)
7088 mlog_errno(ret);
7089
7090out:
7091 brelse(new_blk_bh);
7092 return ret;
7093}
7094
Tao Ma0fe9b662009-08-18 11:47:56 +08007095static int ocfs2_reflink_xattr_no_security(struct ocfs2_xattr_entry *xe)
7096{
7097 int type = ocfs2_xattr_get_type(xe);
7098
7099 return type != OCFS2_XATTR_INDEX_SECURITY &&
7100 type != OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS &&
7101 type != OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
7102}
7103
Tao Ma2999d122009-08-18 11:43:55 +08007104int ocfs2_reflink_xattrs(struct inode *old_inode,
7105 struct buffer_head *old_bh,
7106 struct inode *new_inode,
Tao Ma0fe9b662009-08-18 11:47:56 +08007107 struct buffer_head *new_bh,
7108 bool preserve_security)
Tao Ma2999d122009-08-18 11:43:55 +08007109{
7110 int ret;
7111 struct ocfs2_xattr_reflink args;
7112 struct ocfs2_inode_info *oi = OCFS2_I(old_inode);
7113 struct ocfs2_dinode *di = (struct ocfs2_dinode *)old_bh->b_data;
7114 struct buffer_head *blk_bh = NULL;
7115 struct ocfs2_cached_dealloc_ctxt dealloc;
7116 struct ocfs2_refcount_tree *ref_tree;
7117 struct buffer_head *ref_root_bh = NULL;
7118
7119 ret = ocfs2_lock_refcount_tree(OCFS2_SB(old_inode->i_sb),
7120 le64_to_cpu(di->i_refcount_loc),
7121 1, &ref_tree, &ref_root_bh);
7122 if (ret) {
7123 mlog_errno(ret);
7124 goto out;
7125 }
7126
7127 ocfs2_init_dealloc_ctxt(&dealloc);
7128
7129 args.old_inode = old_inode;
7130 args.new_inode = new_inode;
7131 args.old_bh = old_bh;
7132 args.new_bh = new_bh;
7133 args.ref_ci = &ref_tree->rf_ci;
7134 args.ref_root_bh = ref_root_bh;
7135 args.dealloc = &dealloc;
Tao Ma0fe9b662009-08-18 11:47:56 +08007136 if (preserve_security)
7137 args.xattr_reflinked = NULL;
7138 else
7139 args.xattr_reflinked = ocfs2_reflink_xattr_no_security;
Tao Ma2999d122009-08-18 11:43:55 +08007140
7141 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
7142 ret = ocfs2_reflink_xattr_inline(&args);
7143 if (ret) {
7144 mlog_errno(ret);
7145 goto out_unlock;
7146 }
7147 }
7148
7149 if (!di->i_xattr_loc)
7150 goto out_unlock;
7151
7152 ret = ocfs2_read_xattr_block(old_inode, le64_to_cpu(di->i_xattr_loc),
7153 &blk_bh);
7154 if (ret < 0) {
7155 mlog_errno(ret);
7156 goto out_unlock;
7157 }
7158
7159 ret = ocfs2_reflink_xattr_in_block(&args, blk_bh);
7160 if (ret)
7161 mlog_errno(ret);
7162
7163 brelse(blk_bh);
7164
7165out_unlock:
7166 ocfs2_unlock_refcount_tree(OCFS2_SB(old_inode->i_sb),
7167 ref_tree, 1);
7168 brelse(ref_root_bh);
7169
7170 if (ocfs2_dealloc_has_cluster(&dealloc)) {
7171 ocfs2_schedule_truncate_log_flush(OCFS2_SB(old_inode->i_sb), 1);
7172 ocfs2_run_deallocs(OCFS2_SB(old_inode->i_sb), &dealloc);
7173 }
7174
7175out:
7176 return ret;
7177}
7178
7179/*
Tao Ma0fe9b662009-08-18 11:47:56 +08007180 * Initialize security and acl for a already created inode.
7181 * Used for reflink a non-preserve-security file.
7182 *
7183 * It uses common api like ocfs2_xattr_set, so the caller
7184 * must not hold any lock expect i_mutex.
7185 */
7186int ocfs2_init_security_and_acl(struct inode *dir,
Eric Paris2a7dba32011-02-01 11:05:39 -05007187 struct inode *inode,
7188 const struct qstr *qstr)
Tao Ma0fe9b662009-08-18 11:47:56 +08007189{
7190 int ret = 0;
7191 struct buffer_head *dir_bh = NULL;
Tao Ma0fe9b662009-08-18 11:47:56 +08007192
Mimi Zohar9d8f13b2011-06-06 15:29:25 -04007193 ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
Jeff Liu32918dd2013-02-27 17:02:48 -08007194 if (ret) {
Tao Ma0fe9b662009-08-18 11:47:56 +08007195 mlog_errno(ret);
7196 goto leave;
7197 }
7198
7199 ret = ocfs2_inode_lock(dir, &dir_bh, 0);
7200 if (ret) {
7201 mlog_errno(ret);
7202 goto leave;
7203 }
7204
7205 ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
7206 if (ret)
7207 mlog_errno(ret);
7208
7209 ocfs2_inode_unlock(dir, 0);
7210 brelse(dir_bh);
7211leave:
7212 return ret;
7213}
7214/*
Tiger Yang923f7f32008-11-14 11:16:27 +08007215 * 'security' attributes support
7216 */
Christoph Hellwig431547b2009-11-13 09:52:56 +00007217static size_t ocfs2_xattr_security_list(struct dentry *dentry, char *list,
Tiger Yang923f7f32008-11-14 11:16:27 +08007218 size_t list_size, const char *name,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007219 size_t name_len, int type)
Tiger Yang923f7f32008-11-14 11:16:27 +08007220{
7221 const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN;
7222 const size_t total_len = prefix_len + name_len + 1;
7223
7224 if (list && total_len <= list_size) {
7225 memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
7226 memcpy(list + prefix_len, name, name_len);
7227 list[prefix_len + name_len] = '\0';
7228 }
7229 return total_len;
7230}
7231
Christoph Hellwig431547b2009-11-13 09:52:56 +00007232static int ocfs2_xattr_security_get(struct dentry *dentry, const char *name,
7233 void *buffer, size_t size, int type)
Tiger Yang923f7f32008-11-14 11:16:27 +08007234{
7235 if (strcmp(name, "") == 0)
7236 return -EINVAL;
Christoph Hellwig431547b2009-11-13 09:52:56 +00007237 return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_SECURITY,
7238 name, buffer, size);
Tiger Yang923f7f32008-11-14 11:16:27 +08007239}
7240
Christoph Hellwig431547b2009-11-13 09:52:56 +00007241static int ocfs2_xattr_security_set(struct dentry *dentry, const char *name,
7242 const void *value, size_t size, int flags, int type)
Tiger Yang923f7f32008-11-14 11:16:27 +08007243{
7244 if (strcmp(name, "") == 0)
7245 return -EINVAL;
7246
Christoph Hellwig431547b2009-11-13 09:52:56 +00007247 return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_SECURITY,
7248 name, value, size, flags);
Tiger Yang923f7f32008-11-14 11:16:27 +08007249}
7250
Mimi Zohar9d8f13b2011-06-06 15:29:25 -04007251int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
7252 void *fs_info)
7253{
7254 const struct xattr *xattr;
7255 int err = 0;
7256
7257 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
7258 err = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
7259 xattr->name, xattr->value,
7260 xattr->value_len, XATTR_CREATE);
7261 if (err)
7262 break;
7263 }
7264 return err;
7265}
7266
Tiger Yang534eadd2008-11-14 11:16:41 +08007267int ocfs2_init_security_get(struct inode *inode,
7268 struct inode *dir,
Eric Paris2a7dba32011-02-01 11:05:39 -05007269 const struct qstr *qstr,
Tiger Yang534eadd2008-11-14 11:16:41 +08007270 struct ocfs2_security_xattr_info *si)
7271{
Tiger Yang38d59ef2008-12-17 10:22:56 +08007272 /* check whether ocfs2 support feature xattr */
7273 if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb)))
7274 return -EOPNOTSUPP;
Mimi Zohar9d8f13b2011-06-06 15:29:25 -04007275 if (si)
7276 return security_old_inode_init_security(inode, dir, qstr,
7277 &si->name, &si->value,
7278 &si->value_len);
7279
7280 return security_inode_init_security(inode, dir, qstr,
7281 &ocfs2_initxattrs, NULL);
Tiger Yang534eadd2008-11-14 11:16:41 +08007282}
7283
7284int ocfs2_init_security_set(handle_t *handle,
7285 struct inode *inode,
7286 struct buffer_head *di_bh,
7287 struct ocfs2_security_xattr_info *si,
7288 struct ocfs2_alloc_context *xattr_ac,
7289 struct ocfs2_alloc_context *data_ac)
7290{
7291 return ocfs2_xattr_set_handle(handle, inode, di_bh,
7292 OCFS2_XATTR_INDEX_SECURITY,
7293 si->name, si->value, si->value_len, 0,
7294 xattr_ac, data_ac);
7295}
7296
Stephen Hemminger537d81c2010-05-13 17:53:22 -07007297const struct xattr_handler ocfs2_xattr_security_handler = {
Tiger Yang923f7f32008-11-14 11:16:27 +08007298 .prefix = XATTR_SECURITY_PREFIX,
7299 .list = ocfs2_xattr_security_list,
7300 .get = ocfs2_xattr_security_get,
7301 .set = ocfs2_xattr_security_set,
7302};
7303
7304/*
Mark Fasheh99219ae2008-10-07 14:52:59 -07007305 * 'trusted' attributes support
7306 */
Christoph Hellwig431547b2009-11-13 09:52:56 +00007307static size_t ocfs2_xattr_trusted_list(struct dentry *dentry, char *list,
Mark Fasheh99219ae2008-10-07 14:52:59 -07007308 size_t list_size, const char *name,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007309 size_t name_len, int type)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007310{
Tiger Yangceb1eba2008-10-23 16:34:13 +08007311 const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
Mark Fasheh99219ae2008-10-07 14:52:59 -07007312 const size_t total_len = prefix_len + name_len + 1;
7313
7314 if (list && total_len <= list_size) {
7315 memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
7316 memcpy(list + prefix_len, name, name_len);
7317 list[prefix_len + name_len] = '\0';
7318 }
7319 return total_len;
7320}
7321
Christoph Hellwig431547b2009-11-13 09:52:56 +00007322static int ocfs2_xattr_trusted_get(struct dentry *dentry, const char *name,
7323 void *buffer, size_t size, int type)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007324{
7325 if (strcmp(name, "") == 0)
7326 return -EINVAL;
Christoph Hellwig431547b2009-11-13 09:52:56 +00007327 return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_TRUSTED,
7328 name, buffer, size);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007329}
7330
Christoph Hellwig431547b2009-11-13 09:52:56 +00007331static int ocfs2_xattr_trusted_set(struct dentry *dentry, const char *name,
7332 const void *value, size_t size, int flags, int type)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007333{
7334 if (strcmp(name, "") == 0)
7335 return -EINVAL;
7336
Christoph Hellwig431547b2009-11-13 09:52:56 +00007337 return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_TRUSTED,
7338 name, value, size, flags);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007339}
7340
Stephen Hemminger537d81c2010-05-13 17:53:22 -07007341const struct xattr_handler ocfs2_xattr_trusted_handler = {
Mark Fasheh99219ae2008-10-07 14:52:59 -07007342 .prefix = XATTR_TRUSTED_PREFIX,
7343 .list = ocfs2_xattr_trusted_list,
7344 .get = ocfs2_xattr_trusted_get,
7345 .set = ocfs2_xattr_trusted_set,
7346};
7347
Mark Fasheh99219ae2008-10-07 14:52:59 -07007348/*
7349 * 'user' attributes support
7350 */
Christoph Hellwig431547b2009-11-13 09:52:56 +00007351static size_t ocfs2_xattr_user_list(struct dentry *dentry, char *list,
Mark Fasheh99219ae2008-10-07 14:52:59 -07007352 size_t list_size, const char *name,
Christoph Hellwig431547b2009-11-13 09:52:56 +00007353 size_t name_len, int type)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007354{
Tiger Yangceb1eba2008-10-23 16:34:13 +08007355 const size_t prefix_len = XATTR_USER_PREFIX_LEN;
Mark Fasheh99219ae2008-10-07 14:52:59 -07007356 const size_t total_len = prefix_len + name_len + 1;
Christoph Hellwig431547b2009-11-13 09:52:56 +00007357 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007358
7359 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
7360 return 0;
7361
7362 if (list && total_len <= list_size) {
7363 memcpy(list, XATTR_USER_PREFIX, prefix_len);
7364 memcpy(list + prefix_len, name, name_len);
7365 list[prefix_len + name_len] = '\0';
7366 }
7367 return total_len;
7368}
7369
Christoph Hellwig431547b2009-11-13 09:52:56 +00007370static int ocfs2_xattr_user_get(struct dentry *dentry, const char *name,
7371 void *buffer, size_t size, int type)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007372{
Christoph Hellwig431547b2009-11-13 09:52:56 +00007373 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007374
7375 if (strcmp(name, "") == 0)
7376 return -EINVAL;
7377 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
7378 return -EOPNOTSUPP;
Christoph Hellwig431547b2009-11-13 09:52:56 +00007379 return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_USER, name,
Mark Fasheh99219ae2008-10-07 14:52:59 -07007380 buffer, size);
7381}
7382
Christoph Hellwig431547b2009-11-13 09:52:56 +00007383static int ocfs2_xattr_user_set(struct dentry *dentry, const char *name,
7384 const void *value, size_t size, int flags, int type)
Mark Fasheh99219ae2008-10-07 14:52:59 -07007385{
Christoph Hellwig431547b2009-11-13 09:52:56 +00007386 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007387
7388 if (strcmp(name, "") == 0)
7389 return -EINVAL;
7390 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
7391 return -EOPNOTSUPP;
7392
Christoph Hellwig431547b2009-11-13 09:52:56 +00007393 return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_USER,
7394 name, value, size, flags);
Mark Fasheh99219ae2008-10-07 14:52:59 -07007395}
7396
Stephen Hemminger537d81c2010-05-13 17:53:22 -07007397const struct xattr_handler ocfs2_xattr_user_handler = {
Mark Fasheh99219ae2008-10-07 14:52:59 -07007398 .prefix = XATTR_USER_PREFIX,
7399 .list = ocfs2_xattr_user_list,
7400 .get = ocfs2_xattr_user_get,
7401 .set = ocfs2_xattr_user_set,
7402};