Commit 7bbbf2c2 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'gfs2-4.21.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Bob Peterson:

 - Enhancements and performance improvements to journal replay (Abhi
   Das)

 - Cleanup of gfs2_is_ordered and gfs2_is_writeback (Andreas
   Gruenbacher)

 - Fix a potential double-free in inode creation (Andreas Gruenbacher)

 - Fix the bitmap search loop that was searching too far (Andreas
   Gruenbacher)

 - Various cleanups (Andreas Gruenbacher, Bob Peterson)

 - Implement Steve Whitehouse's patch to dump nrpages for inodes (Bob
   Peterson)

 - Fix a withdraw bug where stuffed journaled data files didn't allocate
   enough journal space to be grown (Bob Peterson)

* tag 'gfs2-4.21.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  gfs2: take jdata unstuff into account in do_grow
  gfs2: Dump nrpages for inodes and their glocks
  gfs2: Fix loop in gfs2_rbm_find
  gfs2: Get rid of potential double-freeing in gfs2_create_inode
  gfs2: Remove vestigial bd_ops
  gfs2: read journal in large chunks to locate the head
  gfs2: add a helper function to get_log_header that can be used elsewhere
  gfs2: changes to gfs2_log_XXX_bio
  gfs2: add more timing info to journal recovery process
  gfs2: Fix the gfs2_invalidatepage description
  gfs2: Clean up gfs2_is_{ordered,writeback}
parents b71acb0e bc020561
......@@ -820,10 +820,10 @@ out:
* @page: the page that's being released
* @gfp_mask: passed from Linux VFS, ignored by us
*
* Call try_to_free_buffers() if the buffers in this page can be
* released.
* Calls try_to_free_buffers() to free the buffers and put the page if the
* buffers can be released.
*
* Returns: 0
* Returns: 1 if the page was put or else 0
*/
int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
......@@ -930,14 +930,14 @@ static const struct address_space_operations gfs2_jdata_aops = {
void gfs2_set_aops(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
if (gfs2_is_writeback(ip))
if (gfs2_is_jdata(ip))
inode->i_mapping->a_ops = &gfs2_jdata_aops;
else if (gfs2_is_writeback(sdp))
inode->i_mapping->a_ops = &gfs2_writeback_aops;
else if (gfs2_is_ordered(ip))
else if (gfs2_is_ordered(sdp))
inode->i_mapping->a_ops = &gfs2_ordered_aops;
else if (gfs2_is_jdata(ip))
inode->i_mapping->a_ops = &gfs2_jdata_aops;
else
BUG();
}
......@@ -14,6 +14,7 @@
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
#include <linux/iomap.h>
#include <linux/ktime.h>
#include "gfs2.h"
#include "incore.h"
......@@ -2083,6 +2084,8 @@ static int do_grow(struct inode *inode, u64 size)
}
error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
(unstuff &&
gfs2_is_jdata(ip) ? RES_JDATA : 0) +
(sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
0 : RES_QUOTA), 0);
if (error)
......@@ -2248,7 +2251,9 @@ int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
unsigned int shift = sdp->sd_sb.sb_bsize_shift;
u64 size;
int rc;
ktime_t start, end;
start = ktime_get();
lblock_stop = i_size_read(jd->jd_inode) >> shift;
size = (lblock_stop - lblock) << shift;
jd->nr_extents = 0;
......@@ -2268,8 +2273,9 @@ int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
lblock += (bh.b_size >> ip->i_inode.i_blkbits);
} while(size > 0);
fs_info(sdp, "journal %d mapped with %u extents\n", jd->jd_jid,
jd->nr_extents);
end = ktime_get();
fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
jd->nr_extents, ktime_ms_delta(end, start));
return 0;
fail:
......
......@@ -1777,7 +1777,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*
*/
void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
unsigned long long dtime;
......
......@@ -202,7 +202,7 @@ extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
struct gfs2_holder *gh);
extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
extern void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { gfs2_dump_glock(NULL, gl); BUG(); } } while(0)
extern __printf(2, 3)
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
......
......@@ -28,6 +28,7 @@
#include "util.h"
#include "trans.h"
#include "dir.h"
#include "lops.h"
struct workqueue_struct *gfs2_freeze_wq;
......@@ -466,17 +467,25 @@ static int inode_go_lock(struct gfs2_holder *gh)
*
*/
static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl)
{
const struct gfs2_inode *ip = gl->gl_object;
struct gfs2_inode *ip = gl->gl_object;
struct inode *inode = &ip->i_inode;
unsigned long nrpages;
if (ip == NULL)
return;
gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
xa_lock_irq(&inode->i_data.i_pages);
nrpages = inode->i_data.nrpages;
xa_unlock_irq(&inode->i_data.i_pages);
gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu p:%lu\n",
(unsigned long long)ip->i_no_formal_ino,
(unsigned long long)ip->i_no_addr,
IF2DT(ip->i_inode.i_mode), ip->i_flags,
(unsigned int)ip->i_diskflags,
(unsigned long long)i_size_read(&ip->i_inode));
(unsigned long long)i_size_read(inode), nrpages);
}
/**
......
......@@ -165,7 +165,6 @@ struct gfs2_bufdata {
u64 bd_blkno;
struct list_head bd_list;
const struct gfs2_log_operations *bd_ops;
struct gfs2_trans *bd_tr;
struct list_head bd_ail_st_list;
......@@ -244,7 +243,7 @@ struct gfs2_glock_operations {
int (*go_demote_ok) (const struct gfs2_glock *gl);
int (*go_lock) (struct gfs2_holder *gh);
void (*go_unlock) (struct gfs2_holder *gh);
void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl);
void (*go_callback)(struct gfs2_glock *gl, bool remote);
const int go_type;
const unsigned long go_flags;
......
......@@ -744,17 +744,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
the gfs2 structures. */
if (default_acl) {
error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
if (error)
goto fail_gunlock3;
posix_acl_release(default_acl);
default_acl = NULL;
}
if (acl) {
if (!error)
error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
if (error)
goto fail_gunlock3;
posix_acl_release(acl);
acl = NULL;
}
if (error)
goto fail_gunlock3;
error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
&gfs2_initxattrs, NULL);
if (error)
......@@ -789,10 +791,8 @@ fail_free_inode:
}
gfs2_rsqa_delete(ip, NULL);
fail_free_acls:
if (default_acl)
posix_acl_release(default_acl);
if (acl)
posix_acl_release(acl);
posix_acl_release(default_acl);
posix_acl_release(acl);
fail_gunlock:
gfs2_dir_no_add(&da);
gfs2_glock_dq_uninit(ghs);
......
......@@ -30,16 +30,14 @@ static inline int gfs2_is_jdata(const struct gfs2_inode *ip)
return ip->i_diskflags & GFS2_DIF_JDATA;
}
static inline int gfs2_is_writeback(const struct gfs2_inode *ip)
static inline bool gfs2_is_ordered(const struct gfs2_sbd *sdp)
{
const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
return (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK) && !gfs2_is_jdata(ip);
return sdp->sd_args.ar_data == GFS2_DATA_ORDERED;
}
static inline int gfs2_is_ordered(const struct gfs2_inode *ip)
static inline bool gfs2_is_writeback(const struct gfs2_sbd *sdp)
{
const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
return (sdp->sd_args.ar_data == GFS2_DATA_ORDERED) && !gfs2_is_jdata(ip);
return sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK;
}
static inline int gfs2_is_dir(const struct gfs2_inode *ip)
......
......@@ -605,7 +605,6 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
bd->bd_blkno = bh->b_blocknr;
gfs2_remove_from_ail(bd); /* drops ref on bh */
bd->bd_bh = NULL;
bd->bd_ops = &gfs2_revoke_lops;
sdp->sd_log_num_revoke++;
atomic_inc(&gl->gl_revokes);
set_bit(GLF_LFLUSH, &gl->gl_flags);
......@@ -734,7 +733,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
lh->lh_crc = cpu_to_be32(crc);
gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr);
gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags);
gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
log_flush_wait(sdp);
}
......@@ -811,7 +810,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
gfs2_ordered_write(sdp);
lops_before_commit(sdp, tr);
gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
if (sdp->sd_log_head != sdp->sd_log_flush_head) {
log_flush_wait(sdp);
......
......@@ -51,12 +51,11 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
if (!gfs2_is_ordered(ip))
if (gfs2_is_jdata(ip) || !gfs2_is_ordered(sdp))
return;
sdp = GFS2_SB(&ip->i_inode);
if (!test_bit(GIF_ORDERED, &ip->i_flags)) {
spin_lock(&sdp->sd_ordered_lock);
if (!test_and_set_bit(GIF_ORDERED, &ip->i_flags))
......
......@@ -17,7 +17,9 @@
#include <linux/bio.h>
#include <linux/fs.h>
#include <linux/list_sort.h>
#include <linux/blkdev.h>
#include "bmap.h"
#include "dir.h"
#include "gfs2.h"
#include "incore.h"
......@@ -193,7 +195,6 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
/**
* gfs2_end_log_write - end of i/o to the log
* @bio: The bio
* @error: Status of i/o request
*
* Each bio_vec contains either data from the pagecache or data
* relating to the log itself. Here we iterate over the bio_vec
......@@ -228,83 +229,86 @@ static void gfs2_end_log_write(struct bio *bio)
}
/**
* gfs2_log_flush_bio - Submit any pending log bio
* @sdp: The superblock
* @op: REQ_OP
* @op_flags: req_flag_bits
* gfs2_log_submit_bio - Submit any pending log bio
* @biop: Address of the bio pointer
* @opf: REQ_OP | op_flags
*
* Submit any pending part-built or full bio to the block device. If
* there is no pending bio, then this is a no-op.
*/
void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags)
void gfs2_log_submit_bio(struct bio **biop, int opf)
{
if (sdp->sd_log_bio) {
struct bio *bio = *biop;
if (bio) {
struct gfs2_sbd *sdp = bio->bi_private;
atomic_inc(&sdp->sd_log_in_flight);
bio_set_op_attrs(sdp->sd_log_bio, op, op_flags);
submit_bio(sdp->sd_log_bio);
sdp->sd_log_bio = NULL;
bio->bi_opf = opf;
submit_bio(bio);
*biop = NULL;
}
}
/**
* gfs2_log_alloc_bio - Allocate a new bio for log writing
* @sdp: The superblock
* @blkno: The next device block number we want to write to
* gfs2_log_alloc_bio - Allocate a bio
* @sdp: The super block
* @blkno: The device block number we want to write to
* @end_io: The bi_end_io callback
*
* This should never be called when there is a cached bio in the
* super block. When it returns, there will be a cached bio in the
* super block which will have as many bio_vecs as the device is
* happy to handle.
* Allocate a new bio, initialize it with the given parameters and return it.
*
* Returns: Newly allocated bio
* Returns: The newly allocated bio
*/
static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
bio_end_io_t *end_io)
{
struct super_block *sb = sdp->sd_vfs;
struct bio *bio;
BUG_ON(sdp->sd_log_bio);
struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
bio_set_dev(bio, sb->s_bdev);
bio->bi_end_io = gfs2_end_log_write;
bio->bi_end_io = end_io;
bio->bi_private = sdp;
sdp->sd_log_bio = bio;
return bio;
}
/**
* gfs2_log_get_bio - Get cached log bio, or allocate a new one
* @sdp: The superblock
* @sdp: The super block
* @blkno: The device block number we want to write to
* @bio: The bio to get or allocate
* @op: REQ_OP
* @end_io: The bi_end_io callback
* @flush: Always flush the current bio and allocate a new one?
*
* If there is a cached bio, then if the next block number is sequential
* with the previous one, return it, otherwise flush the bio to the
* device. If there is not a cached bio, or we just flushed it, then
* device. If there is no cached bio, or we just flushed it, then
* allocate a new one.
*
* Returns: The bio to use for log writes
*/
static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
struct bio **biop, int op,
bio_end_io_t *end_io, bool flush)
{
struct bio *bio = sdp->sd_log_bio;
u64 nblk;
struct bio *bio = *biop;
if (bio) {
u64 nblk;
nblk = bio_end_sector(bio);
nblk >>= sdp->sd_fsb2bb_shift;
if (blkno == nblk)
if (blkno == nblk && !flush)
return bio;
gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
gfs2_log_submit_bio(biop, op);
}
return gfs2_log_alloc_bio(sdp, blkno);
*biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
return *biop;
}
/**
......@@ -326,11 +330,12 @@ void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
struct bio *bio;
int ret;
bio = gfs2_log_get_bio(sdp, blkno);
bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE,
gfs2_end_log_write, false);
ret = bio_add_page(bio, page, size, offset);
if (ret == 0) {
gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
bio = gfs2_log_alloc_bio(sdp, blkno);
bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio,
REQ_OP_WRITE, gfs2_end_log_write, true);
ret = bio_add_page(bio, page, size, offset);
WARN_ON(ret == 0);
}
......@@ -370,6 +375,184 @@ void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
gfs2_log_bmap(sdp));
}
/**
* gfs2_end_log_read - end I/O callback for reads from the log
* @bio: The bio
*
* Simply unlock the pages in the bio. The main thread will wait on them and
* process them in order as necessary.
*/
static void gfs2_end_log_read(struct bio *bio)
{
struct page *page;
struct bio_vec *bvec;
int i;
bio_for_each_segment_all(bvec, bio, i) {
page = bvec->bv_page;
if (bio->bi_status) {
int err = blk_status_to_errno(bio->bi_status);
SetPageError(page);
mapping_set_error(page->mapping, err);
}
unlock_page(page);
}
bio_put(bio);
}
/**
* gfs2_jhead_pg_srch - Look for the journal head in a given page.
* @jd: The journal descriptor
* @page: The page to look in
*
* Returns: 1 if found, 0 otherwise.
*/
static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head,
struct page *page)
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct gfs2_log_header_host uninitialized_var(lh);
void *kaddr = kmap_atomic(page);
unsigned int offset;
bool ret = false;
for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
if (lh.lh_sequence > head->lh_sequence)
*head = lh;
else {
ret = true;
break;
}
}
}
kunmap_atomic(kaddr);
return ret;
}
/**
* gfs2_jhead_process_page - Search/cleanup a page
* @jd: The journal descriptor
* @index: Index of the page to look into
* @done: If set, perform only cleanup, else search and set if found.
*
* Find the page with 'index' in the journal's mapping. Search the page for
* the journal head if requested (cleanup == false). Release refs on the
* page so the page cache can reclaim it (put_page() twice). We grabbed a
* reference on this page two times, first when we did a find_or_create_page()
* to obtain the page to add it to the bio and second when we do a
* find_get_page() here to get the page to wait on while I/O on it is being
* completed.
* This function is also used to free up a page we might've grabbed but not
* used. Maybe we added it to a bio, but not submitted it for I/O. Or we
* submitted the I/O, but we already found the jhead so we only need to drop
* our references to the page.
*/
static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
struct gfs2_log_header_host *head,
bool *done)
{
struct page *page;
page = find_get_page(jd->jd_inode->i_mapping, index);
wait_on_page_locked(page);
if (PageError(page))
*done = true;
if (!*done)
*done = gfs2_jhead_pg_srch(jd, head, page);
put_page(page); /* Once for find_get_page */
put_page(page); /* Once more for find_or_create_page */
}
/**
* gfs2_find_jhead - find the head of a log
* @jd: The journal descriptor
* @head: The log descriptor for the head of the log is returned here
*
* Do a search of a journal by reading it in large chunks using bios and find
* the valid log entry with the highest sequence number. (i.e. the log head)
*
* Returns: 0 on success, errno otherwise
*/
int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct address_space *mapping = jd->jd_inode->i_mapping;
struct gfs2_journal_extent *je;
u32 block, read_idx = 0, submit_idx = 0, index = 0;
int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
int blocks_per_page = 1 << shift, sz, ret = 0;
struct bio *bio = NULL;
struct page *page;
bool done = false;
errseq_t since;
memset(head, 0, sizeof(*head));
if (list_empty(&jd->extent_list))
gfs2_map_journal_extents(sdp, jd);
since = filemap_sample_wb_err(mapping);
list_for_each_entry(je, &jd->extent_list, list) {
for (block = 0; block < je->blocks; block += blocks_per_page) {
index = (je->lblock + block) >> shift;
page = find_or_create_page(mapping, index, GFP_NOFS);
if (!page) {
ret = -ENOMEM;
done = true;
goto out;
}
if (bio) {
sz = bio_add_page(bio, page, PAGE_SIZE, 0);
if (sz == PAGE_SIZE)
goto page_added;
submit_idx = index;
submit_bio(bio);
bio = NULL;
}
bio = gfs2_log_alloc_bio(sdp,
je->dblock + (index << shift),
gfs2_end_log_read);
bio->bi_opf = REQ_OP_READ;
sz = bio_add_page(bio, page, PAGE_SIZE, 0);
gfs2_assert_warn(sdp, sz == PAGE_SIZE);
page_added:
if (submit_idx <= read_idx + BIO_MAX_PAGES) {
/* Keep at least one bio in flight */
continue;
}
gfs2_jhead_process_page(jd, read_idx++, head, &done);
if (done)
goto out; /* found */
}
}
out:
if (bio)
submit_bio(bio);
while (read_idx <= index)
gfs2_jhead_process_page(jd, read_idx++, head, &done);
if (!ret)
ret = filemap_check_wb_err(mapping, since);
return ret;
}
static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
u32 ld_length, u32 ld_data1)
{
......
......@@ -30,8 +30,10 @@ extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp);
extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
unsigned size, unsigned offset, u64 blkno);
extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags);
extern void gfs2_log_submit_bio(struct bio **biop, int opf);
extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head);
static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
{
......
......@@ -41,6 +41,7 @@
#include "dir.h"
#include "meta_io.h"
#include "trace_gfs2.h"
#include "lops.h"
#define DO 0
#define UNDO 1
......
......@@ -120,6 +120,35 @@ void gfs2_revoke_clean(struct gfs2_jdesc *jd)
}
}
int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh,
unsigned int blkno, struct gfs2_log_header_host *head)
{
u32 hash, crc;