balloc.c 25.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/balloc.c
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20
21
#include "ext4.h"
#include "ext4_jbd2.h"
22
#include "mballoc.h"
23

24
25
#include <trace/events/ext4.h>

Eric Sandeen's avatar
Eric Sandeen committed
26
27
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
					    ext4_group_t block_group);
28
29
30
31
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

32
33
34
35
36
37
38
39
40
/*
 * Calculate block group number for a given block number
 */
ext4_group_t ext4_get_group_number(struct super_block *sb,
				   ext4_fsblk_t block)
{
	ext4_group_t group;

	if (test_opt2(sb, STD_GROUP_SIZE))
41
42
		group = (block -
			 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >>
43
44
45
46
47
48
			(EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
	else
		ext4_get_group_no_and_offset(sb, block, &group, NULL);
	return group;
}

49
/*
50
51
 * Calculate the block group number and offset into the block/cluster
 * allocation bitmap, given a block number
52
53
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
54
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
55
{
Dave Kleikamp's avatar
Dave Kleikamp committed
56
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
57
58
	ext4_grpblk_t offset;

Dave Kleikamp's avatar
Dave Kleikamp committed
59
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
60
61
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
		EXT4_SB(sb)->s_cluster_bits;
62
63
64
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
Dave Kleikamp's avatar
Dave Kleikamp committed
65
		*blockgrpp = blocknr;
66
67
68

}

69
70
71
72
73
74
75
/*
 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
 * and 0 otherwise.
 */
static inline int ext4_block_in_group(struct super_block *sb,
				      ext4_fsblk_t block,
				      ext4_group_t block_group)
76
77
{
	ext4_group_t actual_group;
78

79
	actual_group = ext4_get_group_number(sb, block);
80
	return (actual_group == block_group) ? 1 : 0;
81
82
}

83
84
85
/* Return the number of clusters used for file system metadata; this
 * represents the overhead needed by the file system.
 */
86
87
88
static unsigned ext4_num_overhead_clusters(struct super_block *sb,
					   ext4_group_t block_group,
					   struct ext4_group_desc *gdp)
89
{
90
91
92
93
	unsigned num_clusters;
	int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
	ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
	ext4_fsblk_t itbl_blk;
94
95
	struct ext4_sb_info *sbi = EXT4_SB(sb);

96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
	/* This is the number of clusters used by the superblock,
	 * block group descriptors, and reserved block group
	 * descriptor blocks */
	num_clusters = ext4_num_base_meta_clusters(sb, block_group);

	/*
	 * For the allocation bitmaps and inode table, we first need
	 * to check to see if the block is in the block group.  If it
	 * is, then check to see if the cluster is already accounted
	 * for in the clusters used for the base metadata cluster, or
	 * if we can increment the base metadata cluster to include
	 * that block.  Otherwise, we will have to track the cluster
	 * used for the allocation bitmap or inode table explicitly.
	 * Normally all of these blocks are contiguous, so the special
	 * case handling shouldn't be necessary except for *very*
	 * unusual file system layouts.
	 */
	if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
114
115
		block_cluster = EXT4_B2C(sbi,
					 ext4_block_bitmap(sb, gdp) - start);
116
117
118
119
120
121
122
123
124
125
		if (block_cluster < num_clusters)
			block_cluster = -1;
		else if (block_cluster == num_clusters) {
			num_clusters++;
			block_cluster = -1;
		}
	}

	if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
		inode_cluster = EXT4_B2C(sbi,
126
					 ext4_inode_bitmap(sb, gdp) - start);
127
128
129
130
131
132
133
134
135
136
137
		if (inode_cluster < num_clusters)
			inode_cluster = -1;
		else if (inode_cluster == num_clusters) {
			num_clusters++;
			inode_cluster = -1;
		}
	}

	itbl_blk = ext4_inode_table(sb, gdp);
	for (i = 0; i < sbi->s_itb_per_group; i++) {
		if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
138
			c = EXT4_B2C(sbi, itbl_blk + i - start);
139
140
141
142
143
144
145
146
147
			if ((c < num_clusters) || (c == inode_cluster) ||
			    (c == block_cluster) || (c == itbl_cluster))
				continue;
			if (c == num_clusters) {
				num_clusters++;
				continue;
			}
			num_clusters++;
			itbl_cluster = c;
148
149
		}
	}
150
151
152
153
154
155
156

	if (block_cluster != -1)
		num_clusters++;
	if (inode_cluster != -1)
		num_clusters++;

	return num_clusters;
157
}
158

159
160
static unsigned int num_clusters_in_group(struct super_block *sb,
					  ext4_group_t block_group)
161
{
162
163
	unsigned int blocks;

164
165
166
167
168
169
170
	if (block_group == ext4_get_groups_count(sb) - 1) {
		/*
		 * Even though mke2fs always initializes the first and
		 * last group, just in case some other tool was used,
		 * we need to make sure we calculate the right free
		 * blocks.
		 */
171
		blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
172
173
			ext4_group_first_block_no(sb, block_group);
	} else
174
175
		blocks = EXT4_BLOCKS_PER_GROUP(sb);
	return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
176
177
}

178
/* Initializes an uninitialized block bitmap */
179
static int ext4_init_block_bitmap(struct super_block *sb,
180
181
182
				   struct buffer_head *bh,
				   ext4_group_t block_group,
				   struct ext4_group_desc *gdp)
183
{
184
	unsigned int bit, bit_max;
185
	struct ext4_sb_info *sbi = EXT4_SB(sb);
186
187
	ext4_fsblk_t start, tmp;
	int flex_bg = 0;
188
	struct ext4_group_info *grp;
189
190
191
192
193

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks used to prevent allocation
	 * essentially implementing a per-group read-only flag. */
194
	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
195
		grp = ext4_get_group_info(sb, block_group);
196
197
198
		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
199
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
200
201
202
203
204
205
		if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
			int count;
			count = ext4_free_inodes_count(sb, gdp);
			percpu_counter_sub(&sbi->s_freeinodes_counter,
					   count);
		}
206
		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
207
		return -EFSBADCRC;
208
	}
209
	memset(bh->b_data, 0, sb->s_blocksize);
210

211
	bit_max = ext4_num_base_meta_clusters(sb, block_group);
212
213
214
	if ((bit_max >> 3) >= bh->b_size)
		return -EFSCORRUPTED;

215
216
	for (bit = 0; bit < bit_max; bit++)
		ext4_set_bit(bit, bh->b_data);
217

218
	start = ext4_group_first_block_no(sb, block_group);
219

220
	if (ext4_has_feature_flex_bg(sb))
221
		flex_bg = 1;
222

223
224
225
	/* Set bits for block and inode bitmaps, and inode table */
	tmp = ext4_block_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
226
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
227

228
229
	tmp = ext4_inode_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
230
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
231

232
233
234
	tmp = ext4_inode_table(sb, gdp);
	for (; tmp < ext4_inode_table(sb, gdp) +
		     sbi->s_itb_per_group; tmp++) {
235
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
236
			ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
237
	}
238

239
240
241
242
243
	/*
	 * Also if the number of blocks within the group is less than
	 * the blocksize * 8 ( which is the size of bitmap ), set rest
	 * of the block bitmap to 1
	 */
244
	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
245
			     sb->s_blocksize * 8, bh->b_data);
246
	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
247
	ext4_group_desc_csum_set(sb, block_group, gdp);
248
	return 0;
249
250
}

251
252
253
/* Return the number of free blocks in a block group.  It is used when
 * the block bitmap is uninitialized, so we can't just count the bits
 * in the bitmap. */
254
255
256
unsigned ext4_free_clusters_after_init(struct super_block *sb,
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
257
{
258
259
	return num_clusters_in_group(sb, block_group) - 
		ext4_num_overhead_clusters(sb, block_group, gdp);
260
}
261

262
263
264
265
266
267
268
269
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
270
 * when a file system is mounted (see ext4_fill_super).
271
272
273
 */

/**
274
 * ext4_get_group_desc() -- load group descriptor from disk
275
276
277
278
279
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
280
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
281
					     ext4_group_t block_group,
282
					     struct buffer_head **bh)
283
{
284
285
	unsigned int group_desc;
	unsigned int offset;
286
	ext4_group_t ngroups = ext4_get_groups_count(sb);
287
	struct ext4_group_desc *desc;
288
	struct ext4_sb_info *sbi = EXT4_SB(sb);
289

290
	if (block_group >= ngroups) {
291
292
		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
			   " groups_count = %u", block_group, ngroups);
293
294
295
296

		return NULL;
	}

297
298
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
299
	if (!sbi->s_group_desc[group_desc]) {
300
		ext4_error(sb, "Group descriptor not loaded - "
301
			   "block_group = %u, group_desc = %u, desc = %u",
302
			   block_group, group_desc, offset);
303
304
305
		return NULL;
	}

306
307
308
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
309
310
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
311
	return desc;
312
313
}

314
315
316
317
318
319
/*
 * Return the block number which was discovered to be invalid, or 0 if
 * the block bitmap is valid.
 */
static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
					    struct ext4_group_desc *desc,
320
					    ext4_group_t block_group,
321
					    struct buffer_head *bh)
322
{
323
	struct ext4_sb_info *sbi = EXT4_SB(sb);
324
325
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
326
	ext4_fsblk_t blk;
327
328
	ext4_fsblk_t group_first_block;

329
	if (ext4_has_feature_flex_bg(sb)) {
330
331
332
333
334
335
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
336
		return 0;
337
338
339
340
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
341
342
	blk = ext4_block_bitmap(sb, desc);
	offset = blk - group_first_block;
343
	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
344
		/* bad block bitmap */
345
		return blk;
346
347

	/* check whether the inode bitmap block number is set */
348
349
	blk = ext4_inode_bitmap(sb, desc);
	offset = blk - group_first_block;
350
	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
351
		/* bad block bitmap */
352
		return blk;
353
354

	/* check whether the inode table block number is set */
355
356
	blk = ext4_inode_table(sb, desc);
	offset = blk - group_first_block;
357
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
358
			EXT4_B2C(sbi, offset + sbi->s_itb_per_group),
359
360
			EXT4_B2C(sbi, offset));
	if (next_zero_bit <
361
	    EXT4_B2C(sbi, offset + sbi->s_itb_per_group))
362
363
		/* bad bitmap for inode tables */
		return blk;
364
365
	return 0;
}
366

367
368
369
370
static int ext4_validate_block_bitmap(struct super_block *sb,
				      struct ext4_group_desc *desc,
				      ext4_group_t block_group,
				      struct buffer_head *bh)
371
{
372
	ext4_fsblk_t	blk;
373
	struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
374
	struct ext4_sb_info *sbi = EXT4_SB(sb);
375

376
377
378
379
	if (buffer_verified(bh))
		return 0;
	if (EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
		return -EFSCORRUPTED;
380
381

	ext4_lock_group(sb, block_group);
382
383
	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
			desc, bh))) {
384
		ext4_unlock_group(sb, block_group);
385
		ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
386
387
388
		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
389
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
390
		return -EFSBADCRC;
391
	}
392
393
	blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
	if (unlikely(blk != 0)) {
394
		ext4_unlock_group(sb, block_group);
395
396
		ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
			   block_group, blk);
397
398
399
		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
400
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
401
		return -EFSCORRUPTED;
402
403
	}
	set_buffer_verified(bh);
404
	ext4_unlock_group(sb, block_group);
405
	return 0;
406
407
}

408
/**
409
 * ext4_read_block_bitmap_nowait()
410
411
412
 * @sb:			super block
 * @block_group:	given block group
 *
413
414
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
415
416
417
 *
 * Return buffer_head on success or NULL in case of failure.
 */
418
struct buffer_head *
419
ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
420
{
421
	struct ext4_group_desc *desc;
422
	struct buffer_head *bh;
423
	ext4_fsblk_t bitmap_blk;
424
	int err;
425

426
	desc = ext4_get_group_desc(sb, block_group, NULL);
427
	if (!desc)
428
		return ERR_PTR(-EFSCORRUPTED);
429
	bitmap_blk = ext4_block_bitmap(sb, desc);
430
431
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
432
433
434
		ext4_error(sb, "Cannot get buffer for block bitmap - "
			   "block_group = %u, block_bitmap = %llu",
			   block_group, bitmap_blk);
435
		return ERR_PTR(-ENOMEM);
436
	}
437
438

	if (bitmap_uptodate(bh))
439
		goto verify;
440

441
	lock_buffer(bh);
442
443
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
444
		goto verify;
445
	}
446
	ext4_lock_group(sb, block_group);
447
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
448
		err = ext4_init_block_bitmap(sb, bh, block_group, desc);
449
		set_bitmap_uptodate(bh);
450
		set_buffer_uptodate(bh);
451
		ext4_unlock_group(sb, block_group);
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
452
		unlock_buffer(bh);
453
454
455
		if (err) {
			ext4_error(sb, "Failed to init block bitmap for group "
				   "%u: %d", block_group, err);
456
			goto out;
457
		}
458
		goto verify;
459
	}
460
	ext4_unlock_group(sb, block_group);
461
462
463
464
465
466
467
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
468
		goto verify;
469
470
	}
	/*
471
	 * submit the buffer_head for reading
472
	 */
473
	set_buffer_new(bh);
474
	trace_ext4_read_block_bitmap_load(sb, block_group);
475
476
	bh->b_end_io = ext4_end_bitmap_read;
	get_bh(bh);
477
	submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
478
	return bh;
479
verify:
480
481
482
483
484
	err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
	if (err)
		goto out;
	return bh;
out:
485
	put_bh(bh);
486
	return ERR_PTR(err);
487
488
489
490
491
492
493
494
495
496
497
498
}

/* Returns 0 on success, 1 on error */
int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
			   struct buffer_head *bh)
{
	struct ext4_group_desc *desc;

	if (!buffer_new(bh))
		return 0;
	desc = ext4_get_group_desc(sb, block_group, NULL);
	if (!desc)
499
		return -EFSCORRUPTED;
500
501
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh)) {
502
		ext4_error(sb, "Cannot read block bitmap - "
503
			   "block_group = %u, block_bitmap = %llu",
504
			   block_group, (unsigned long long) bh->b_blocknr);
505
		return -EIO;
506
	}
507
508
	clear_buffer_new(bh);
	/* Panic or remount fs read-only if block bitmap is invalid */
509
	return ext4_validate_block_bitmap(sb, desc, block_group, bh);
510
511
512
513
514
515
}

struct buffer_head *
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
{
	struct buffer_head *bh;
516
	int err;
517
518

	bh = ext4_read_block_bitmap_nowait(sb, block_group);
519
520
521
522
	if (IS_ERR(bh))
		return bh;
	err = ext4_wait_block_bitmap(sb, block_group, bh);
	if (err) {
523
		put_bh(bh);
524
		return ERR_PTR(err);
525
	}
526
527
528
	return bh;
}

529
/**
530
 * ext4_has_free_clusters()
531
 * @sbi:	in-core super block structure.
532
533
 * @nclusters:	number of needed blocks
 * @flags:	flags from ext4_mb_new_blocks()
534
 *
535
 * Check if filesystem has nclusters free & available for allocation.
536
537
 * On success return 1, return 0 on failure.
 */
538
539
static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
				  s64 nclusters, unsigned int flags)
540
{
Lukas Czerner's avatar
Lukas Czerner committed
541
	s64 free_clusters, dirty_clusters, rsv, resv_clusters;
542
	struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
543
	struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
544

545
546
	free_clusters  = percpu_counter_read_positive(fcc);
	dirty_clusters = percpu_counter_read_positive(dcc);
Lukas Czerner's avatar
Lukas Czerner committed
547
	resv_clusters = atomic64_read(&sbi->s_resv_clusters);
548
549
550
551
552

	/*
	 * r_blocks_count should always be multiple of the cluster ratio so
	 * we are safe to do a plane bit shift only.
	 */
Lukas Czerner's avatar
Lukas Czerner committed
553
554
	rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) +
	      resv_clusters;
555

Lukas Czerner's avatar
Lukas Czerner committed
556
	if (free_clusters - (nclusters + rsv + dirty_clusters) <
557
					EXT4_FREECLUSTERS_WATERMARK) {
558
		free_clusters  = percpu_counter_sum_positive(fcc);
559
		dirty_clusters = percpu_counter_sum_positive(dcc);
560
	}
561
562
	/* Check whether we have space after accounting for current
	 * dirty clusters & root reserved clusters.
563
	 */
Lukas Czerner's avatar
Lukas Czerner committed
564
	if (free_clusters >= (rsv + nclusters + dirty_clusters))
565
		return 1;
566

567
	/* Hm, nope.  Are (enough) root reserved clusters available? */
568
569
	if (uid_eq(sbi->s_resuid, current_fsuid()) ||
	    (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
570
	    capable(CAP_SYS_RESOURCE) ||
Lukas Czerner's avatar
Lukas Czerner committed
571
	    (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
572

Lukas Czerner's avatar
Lukas Czerner committed
573
574
575
576
577
578
		if (free_clusters >= (nclusters + dirty_clusters +
				      resv_clusters))
			return 1;
	}
	/* No free blocks. Let's see if we can dip into reserved pool */
	if (flags & EXT4_MB_USE_RESERVED) {
579
		if (free_clusters >= (nclusters + dirty_clusters))
580
581
582
583
			return 1;
	}

	return 0;
584
585
}

586
587
int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
			     s64 nclusters, unsigned int flags)
588
{
589
	if (ext4_has_free_clusters(sbi, nclusters, flags)) {
590
		percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
591
		return 0;
592
593
	} else
		return -ENOSPC;
594
}
595

596
/**
597
 * ext4_should_retry_alloc()
598
599
600
 * @sb:			super block
 * @retries		number of attemps has been made
 *
601
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
602
 * it is profitable to retry the operation, this function will wait
Lucas De Marchi's avatar
Lucas De Marchi committed
603
 * for the current or committing transaction to complete, and then
604
 * return TRUE.  We will only retry once.
605
 */
606
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
607
{
608
	if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
609
	    (*retries)++ > 1 ||
610
	    !EXT4_SB(sb)->s_journal)
611
612
		return 0;

613
	smp_mb();
614
615
616
617
618
	if (EXT4_SB(sb)->s_mb_free_pending == 0)
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
	jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
619
	return 1;
620
621
}

622
/*
623
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
624
625
626
627
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
628
 * @count:		pointer to total number of clusters needed
629
630
 * @errp:               error code
 *
Theodore Ts'o's avatar
Theodore Ts'o committed
631
 * Return 1st allocated block number on success, *count stores total account
632
 * error stores in errp pointer
633
 */
634
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
635
636
				  ext4_fsblk_t goal, unsigned int flags,
				  unsigned long *count, int *errp)
637
{
Theodore Ts'o's avatar
Theodore Ts'o committed
638
	struct ext4_allocation_request ar;
639
	ext4_fsblk_t ret;
Theodore Ts'o's avatar
Theodore Ts'o committed
640
641
642
643
644
645

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;
646
	ar.flags = flags;
Theodore Ts'o's avatar
Theodore Ts'o committed
647
648
649
650

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;
651
	/*
652
653
	 * Account for the allocated meta blocks.  We will never
	 * fail EDQUOT for metdata, but we do account for it.
654
	 */
655
	if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
656
657
		dquot_alloc_block_nofail(inode,
				EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
658
659
	}
	return ret;
660
661
}

662
/**
663
 * ext4_count_free_clusters() -- count filesystem free clusters
664
665
 * @sb:		superblock
 *
666
 * Adds up the number of free clusters from each block group.
667
 */
668
ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
669
{
670
671
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
672
	ext4_group_t i;
673
	ext4_group_t ngroups = ext4_get_groups_count(sb);
674
	struct ext4_group_info *grp;
675
676
677
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
678
	unsigned int x;
679
680
	struct buffer_head *bitmap_bh = NULL;

681
	es = EXT4_SB(sb)->s_es;
682
683
684
685
686
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	for (i = 0; i < ngroups; i++) {
687
		gdp = ext4_get_group_desc(sb, i, NULL);
688
689
		if (!gdp)
			continue;
690
691
692
693
694
		grp = NULL;
		if (EXT4_SB(sb)->s_group_info)
			grp = ext4_get_group_info(sb, i);
		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			desc_count += ext4_free_group_clusters(sb, gdp);
695
		brelse(bitmap_bh);
696
		bitmap_bh = ext4_read_block_bitmap(sb, i);
697
698
		if (IS_ERR(bitmap_bh)) {
			bitmap_bh = NULL;
699
			continue;
700
		}
701

702
		x = ext4_count_free(bitmap_bh->b_data,
703
				    EXT4_CLUSTERS_PER_GROUP(sb) / 8);
704
		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
705
			i, ext4_free_group_clusters(sb, gdp), x);
706
707
708
		bitmap_count += x;
	}
	brelse(bitmap_bh);
709
710
	printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
	       ", computed = %llu, %llu\n",
711
	       EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
712
	       desc_count, bitmap_count);
713
714
715
716
	return bitmap_count;
#else
	desc_count = 0;
	for (i = 0; i < ngroups; i++) {
717
		gdp = ext4_get_group_desc(sb, i, NULL);
718
719
		if (!gdp)
			continue;
720
721
722
723
724
		grp = NULL;
		if (EXT4_SB(sb)->s_group_info)
			grp = ext4_get_group_info(sb, i);
		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			desc_count += ext4_free_group_clusters(sb, gdp);
725
726
727
728
729
730
	}

	return desc_count;
#endif
}

731
static inline int test_root(ext4_group_t a, int b)
732
{
Theodore Ts'o's avatar
Theodore Ts'o committed
733
734
735
736
737
738
739
740
741
	while (1) {
		if (a < b)
			return 0;
		if (a == b)
			return 1;
		if ((a % b) != 0)
			return 0;
		a = a / b;
	}
742
743
744
}

/**
745
 *	ext4_bg_has_super - number of blocks used by the superblock in group
746
747
748
749
750
751
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
752
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
753
{
754
755
756
757
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

	if (group == 0)
		return 1;
758
	if (ext4_has_feature_sparse_super2(sb)) {
759
760
761
762
763
		if (group == le32_to_cpu(es->s_backup_bgs[0]) ||
		    group == le32_to_cpu(es->s_backup_bgs[1]))
			return 1;
		return 0;
	}
764
	if ((group <= 1) || !ext4_has_feature_sparse_super(sb))
765
766
		return 1;
	if (!(group & 1))
767
		return 0;
768
769
770
771
772
	if (test_root(group, 3) || (test_root(group, 5)) ||
	    test_root(group, 7))
		return 1;

	return 0;
773
774
}

775
776
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
777
{
778
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
779
780
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
781
782
783
784
785
786

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

787
788
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
789
{
790
791
792
	if (!ext4_bg_has_super(sb, group))
		return 0;

793
	if (ext4_has_feature_meta_bg(sb))
794
795
796
		return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	else
		return EXT4_SB(sb)->s_gdb_count;
797
798
799
}

/**
800
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
801
802
803
804
805
806
807
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
808
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
809
810
{
	unsigned long first_meta_bg =
811
812
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
813

814
	if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg)
815
		return ext4_bg_num_gdb_nometa(sb, group);
816

817
	return ext4_bg_num_gdb_meta(sb,group);
818
819

}
820

821
/*
822
 * This function returns the number of file system metadata clusters at
823
824
 * the beginning of a block group, including the reserved gdt blocks.
 */
Eric Sandeen's avatar
Eric Sandeen committed
825
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
826
				     ext4_group_t block_group)
827
828
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
829
	unsigned num;
830
831
832
833

	/* Check for superblock and gdt backups in this group */
	num = ext4_bg_has_super(sb, block_group);

834
	if (!ext4_has_feature_meta_bg(sb) ||
835
836
837
838
839
840
841
842
843
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (num) {
			num += ext4_bg_num_gdb(sb, block_group);
			num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
		num += ext4_bg_num_gdb(sb, block_group);
	}
844
	return EXT4_NUM_B2C(sbi, num);
845
}
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
/**
 *	ext4_inode_to_goal_block - return a hint for block allocation
 *	@inode: inode for block allocation
 *
 *	Return the ideal location to start allocating blocks for a
 *	newly created inode.
 */
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	ext4_group_t block_group;
	ext4_grpblk_t colour;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
	ext4_fsblk_t bg_start;
	ext4_fsblk_t last_block;

	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
		 * block groups per flexgroup, reserve the first block
		 * group for directories and special files.  Regular
		 * files will start at the second block group.  This
		 * tends to speed up directory access and improves
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
	return bg_start + colour;
}