Commit e18b890b authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] slab: remove kmem_cache_t

Replace all uses of kmem_cache_t with struct kmem_cache.

The patch was generated using the following script:

	#!/bin/sh
	#
	# Replace one string by another in all the kernel sources.
	#

	set -e

	for file in `find * -name "*.c" -o -name "*.h"|xargs grep -l $1`; do
		quilt add $file
		sed -e "1,\$s/$1/$2/g" $file >/tmp/$$
		mv /tmp/$$ $file
		quilt refresh
	done

The script was run like this

	sh replace kmem_cache_t "struct kmem_cache"
Signed-off-by: 's avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: 's avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: 's avatarLinus Torvalds <torvalds@osdl.org>
parent 441e143e
......@@ -77,7 +77,7 @@ To get this part of the dma_ API, you must #include <linux/dmapool.h>
Many drivers need lots of small dma-coherent memory regions for DMA
descriptors or I/O buffers. Rather than allocating in units of a page
or more using dma_alloc_coherent(), you can use DMA pools. These work
much like a kmem_cache_t, except that they use the dma-coherent allocator
much like a struct kmem_cache, except that they use the dma-coherent allocator
not __get_free_pages(). Also, they understand common hardware constraints
for alignment, like queue heads needing to be aligned on N byte boundaries.
......@@ -94,7 +94,7 @@ The pool create() routines initialize a pool of dma-coherent buffers
for use with a given device. It must be called in a context which
can sleep.
The "name" is for diagnostics (like a kmem_cache_t name); dev and size
The "name" is for diagnostics (like a struct kmem_cache name); dev and size
are like what you'd pass to dma_alloc_coherent(). The device's hardware
alignment requirement for this type of data is "align" (which is expressed
in bytes, and must be a power of two). If your device has no boundary
......
......@@ -40,7 +40,7 @@
/* io map for dma */
static void __iomem *dma_base;
static kmem_cache_t *dma_kmem;
static struct kmem_cache *dma_kmem;
struct s3c24xx_dma_selection dma_sel;
......@@ -1271,7 +1271,7 @@ struct sysdev_class dma_sysclass = {
/* kmem cache implementation */
static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f)
static void s3c2410_dma_cache_ctor(void *p, struct kmem_cache *c, unsigned long f)
{
memset(p, 0, sizeof(struct s3c2410_dma_buf));
}
......
......@@ -24,7 +24,7 @@
#define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
kmem_cache_t *pte_cache, *pgd_cache;
struct kmem_cache *pte_cache, *pgd_cache;
int page_nr;
/*
......@@ -162,12 +162,12 @@ void __init create_memmap_holes(struct meminfo *mi)
{
}
static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
static void pte_cache_ctor(void *pte, struct kmem_cache *cache, unsigned long flags)
{
memzero(pte, sizeof(pte_t) * PTRS_PER_PTE);
}
static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags)
static void pgd_cache_ctor(void *pgd, struct kmem_cache *cache, unsigned long flags)
{
memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t));
}
......
......@@ -18,7 +18,7 @@
#include <asm/cacheflush.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
kmem_cache_t *pgd_cache;
struct kmem_cache *pgd_cache;
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
......@@ -100,7 +100,7 @@ static inline void pgd_list_del(pgd_t *pgd)
set_page_private(next, (unsigned long) pprev);
}
void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
unsigned long flags;
......@@ -120,7 +120,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
}
/* never called when PTRS_PER_PMD > 1 */
void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
unsigned long flags; /* can be called from interrupt context */
......
......@@ -699,8 +699,8 @@ int remove_memory(u64 start, u64 size)
#endif
#endif
kmem_cache_t *pgd_cache;
kmem_cache_t *pmd_cache;
struct kmem_cache *pgd_cache;
struct kmem_cache *pmd_cache;
void __init pgtable_cache_init(void)
{
......
......@@ -193,7 +193,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return pte;
}
void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
{
memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
}
......@@ -233,7 +233,7 @@ static inline void pgd_list_del(pgd_t *pgd)
set_page_private(next, (unsigned long)pprev);
}
void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
unsigned long flags;
......@@ -253,7 +253,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
}
/* never called when PTRS_PER_PMD > 1 */
void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
unsigned long flags; /* can be called from interrupt context */
......
......@@ -249,7 +249,7 @@ ia32_init (void)
#if PAGE_SHIFT > IA32_PAGE_SHIFT
{
extern kmem_cache_t *partial_page_cachep;
extern struct kmem_cache *partial_page_cachep;
partial_page_cachep = kmem_cache_create("partial_page_cache",
sizeof(struct partial_page), 0, 0,
......
......@@ -254,7 +254,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
}
/* SLAB cache for partial_page structures */
kmem_cache_t *partial_page_cachep;
struct kmem_cache *partial_page_cachep;
/*
* init partial_page_list.
......
......@@ -101,7 +101,7 @@ struct flash_block_list_header { /* just the header of flash_block_list */
static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
/* Use slab cache to guarantee 4k alignment */
static kmem_cache_t *flash_block_cache = NULL;
static struct kmem_cache *flash_block_cache = NULL;
#define FLASH_BLOCK_LIST_VERSION (1UL)
......@@ -286,7 +286,7 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
}
/* constructor for flash_block_cache */
void rtas_block_ctor(void *ptr, kmem_cache_t *cache, unsigned long flags)
void rtas_block_ctor(void *ptr, struct kmem_cache *cache, unsigned long flags)
{
memset(ptr, 0, RTAS_BLK_SIZE);
}
......
......@@ -1047,7 +1047,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
return err;
}
static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{
memset(addr, 0, kmem_cache_size(cache));
}
......
......@@ -141,7 +141,7 @@ static int __init setup_kcore(void)
}
module_init(setup_kcore);
static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{
memset(addr, 0, kmem_cache_size(cache));
}
......@@ -166,9 +166,9 @@ static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
/* Hugepages need one extra cache, initialized in hugetlbpage.c. We
* can't put into the tables above, because HPAGE_SHIFT is not compile
* time constant. */
kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
#else
kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
#endif
void pgtable_cache_init(void)
......
......@@ -40,7 +40,7 @@
#include "spufs.h"
static kmem_cache_t *spufs_inode_cache;
static struct kmem_cache *spufs_inode_cache;
char *isolated_loader;
static struct inode *
......@@ -65,7 +65,7 @@ spufs_destroy_inode(struct inode *inode)
}
static void
spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags)
spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
{
struct spufs_inode_info *ei = p;
......
......@@ -38,7 +38,7 @@ struct sq_mapping {
static struct sq_mapping *sq_mapping_list;
static DEFINE_SPINLOCK(sq_mapping_lock);
static kmem_cache_t *sq_cache;
static struct kmem_cache *sq_cache;
static unsigned long *sq_bitmap;
#define store_queue_barrier() \
......
......@@ -30,7 +30,7 @@
#define NR_PMB_ENTRIES 16
static kmem_cache_t *pmb_cache;
static struct kmem_cache *pmb_cache;
static unsigned long pmb_map;
static struct pmb_entry pmb_init_map[] = {
......@@ -283,7 +283,7 @@ void pmb_unmap(unsigned long addr)
} while (pmbe);
}
static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
{
struct pmb_entry *pmbe = pmb;
......@@ -297,7 +297,7 @@ static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
spin_unlock_irq(&pmb_list_lock);
}
static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
{
spin_lock_irq(&pmb_list_lock);
pmb_list_del(pmb);
......
......@@ -176,9 +176,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
int bigkernel = 0;
kmem_cache_t *pgtable_cache __read_mostly;
struct kmem_cache *pgtable_cache __read_mostly;
static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{
clear_page(addr);
}
......
......@@ -239,7 +239,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
}
}
static kmem_cache_t *tsb_caches[8] __read_mostly;
static struct kmem_cache *tsb_caches[8] __read_mostly;
static const char *tsb_cache_names[8] = {
"tsb_8KB",
......
......@@ -43,8 +43,8 @@ static int cfq_slice_idle = HZ / 125;
#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq) ((rq)->elevator_private2)
static kmem_cache_t *cfq_pool;
static kmem_cache_t *cfq_ioc_pool;
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone;
......
......@@ -44,17 +44,17 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node);
/*
* For the allocated request tables
*/
static kmem_cache_t *request_cachep;
static struct kmem_cache *request_cachep;
/*
* For queue allocation
*/
static kmem_cache_t *requestq_cachep;
static struct kmem_cache *requestq_cachep;
/*
* For io context allocations
*/
static kmem_cache_t *iocontext_cachep;
static struct kmem_cache *iocontext_cachep;
/*
* Controlling structure to kblockd
......
......@@ -12,7 +12,7 @@
#include <linux/netdevice.h>
#include "aoe.h"
static kmem_cache_t *buf_pool_cache;
static struct kmem_cache *buf_pool_cache;
static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
{
......
......@@ -133,7 +133,7 @@ struct eth1394_node_info {
#define ETH1394_DRIVER_NAME "eth1394"
static const char driver_name[] = ETH1394_DRIVER_NAME;
static kmem_cache_t *packet_task_cache;
static struct kmem_cache *packet_task_cache;
static struct hpsb_highlevel eth1394_highlevel;
......
......@@ -101,7 +101,7 @@ struct crypt_config {
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES 8
static kmem_cache_t *_crypt_io_pool;
static struct kmem_cache *_crypt_io_pool;
/*
* Different IV generation algorithms:
......
......@@ -101,7 +101,7 @@ typedef int (*action_fn) (struct pgpath *pgpath);
#define MIN_IOS 256 /* Mempool size */
static kmem_cache_t *_mpio_cache;
static struct kmem_cache *_mpio_cache;
struct workqueue_struct *kmultipathd;
static void process_queued_ios(struct work_struct *work);
......
......@@ -88,8 +88,8 @@ struct pending_exception {
* Hash table mapping origin volumes to lists of snapshots and
* a lock to protect it
*/
static kmem_cache_t *exception_cache;
static kmem_cache_t *pending_cache;
static struct kmem_cache *exception_cache;
static struct kmem_cache *pending_cache;
static mempool_t *pending_pool;
/*
......@@ -228,7 +228,7 @@ static int init_exception_table(struct exception_table *et, uint32_t size)
return 0;
}
static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem)
static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
{
struct list_head *slot;
struct exception *ex, *next;
......
......@@ -121,8 +121,8 @@ struct mapped_device {
};
#define MIN_IOS 256
static kmem_cache_t *_io_cache;
static kmem_cache_t *_tio_cache;
static struct kmem_cache *_io_cache;
static struct kmem_cache *_tio_cache;
static int __init local_init(void)
{
......
......@@ -203,7 +203,7 @@ struct kcopyd_job {
/* FIXME: this should scale with the number of pages */
#define MIN_JOBS 512
static kmem_cache_t *_job_cache;
static struct kmem_cache *_job_cache;
static mempool_t *_job_pool;
/*
......
......@@ -348,7 +348,7 @@ static int grow_one_stripe(raid5_conf_t *conf)
static int grow_stripes(raid5_conf_t *conf, int num)
{
kmem_cache_t *sc;
struct kmem_cache *sc;
int devs = conf->raid_disks;
sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
......@@ -397,7 +397,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
LIST_HEAD(newstripes);
struct disk_info *ndisks;
int err = 0;
kmem_cache_t *sc;
struct kmem_cache *sc;
int i;
if (newsize <= conf->pool_size)
......
......@@ -64,7 +64,7 @@
/* I2O Block OSM mempool struct */
struct i2o_block_mempool {
kmem_cache_t *slab;
struct kmem_cache *slab;
mempool_t *pool;
};
......
......@@ -26,7 +26,7 @@
static DEFINE_SPINLOCK(msi_lock);
static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
static kmem_cache_t* msi_cachep;
static struct kmem_cache* msi_cachep;
static int pci_msi_enable = 1;
......
......@@ -25,7 +25,7 @@
#include "dasd_int.h"
kmem_cache_t *dasd_page_cache;
struct kmem_cache *dasd_page_cache;
EXPORT_SYMBOL_GPL(dasd_page_cache);
/*
......
......@@ -474,7 +474,7 @@ extern struct dasd_profile_info_t dasd_global_profile;
extern unsigned int dasd_profile_level;
extern struct block_device_operations dasd_device_operations;
extern kmem_cache_t *dasd_page_cache;
extern struct kmem_cache *dasd_page_cache;
struct dasd_ccw_req *
dasd_kmalloc_request(char *, int, int, struct dasd_device *);
......
......@@ -1032,9 +1032,9 @@ struct zfcp_data {
wwn_t init_wwpn;
fcp_lun_t init_fcp_lun;
char *driver_version;
kmem_cache_t *fsf_req_qtcb_cache;
kmem_cache_t *sr_buffer_cache;
kmem_cache_t *gid_pn_cache;
struct kmem_cache *fsf_req_qtcb_cache;
struct kmem_cache *sr_buffer_cache;
struct kmem_cache *gid_pn_cache;
};
/**
......
......@@ -56,8 +56,8 @@
/* 2*ITNL timeout + 1 second */
#define AIC94XX_SCB_TIMEOUT (5*HZ)
extern kmem_cache_t *asd_dma_token_cache;
extern kmem_cache_t *asd_ascb_cache;
extern struct kmem_cache *asd_dma_token_cache;
extern struct kmem_cache *asd_ascb_cache;
extern char sas_addr_str[2*SAS_ADDR_SIZE + 1];
static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
......
......@@ -1047,7 +1047,7 @@ irqreturn_t asd_hw_isr(int irq, void *dev_id)
static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
gfp_t gfp_flags)
{
extern kmem_cache_t *asd_ascb_cache;
extern struct kmem_cache *asd_ascb_cache;
struct asd_seq_data *seq = &asd_ha->seq;
struct asd_ascb *ascb;
unsigned long flags;
......
......@@ -450,8 +450,8 @@ static inline void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha)
asd_ha->scb_pool = NULL;
}
kmem_cache_t *asd_dma_token_cache;
kmem_cache_t *asd_ascb_cache;
struct kmem_cache *asd_dma_token_cache;
struct kmem_cache *asd_ascb_cache;
static int asd_create_global_caches(void)
{
......
......@@ -36,7 +36,7 @@
#include "../scsi_sas_internal.h"
kmem_cache_t *sas_task_cache;
struct kmem_cache *sas_task_cache;
/*------------ SAS addr hash -----------*/
void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
......
......@@ -24,7 +24,7 @@ char qla2x00_version_str[40];
/*
* SRB allocation cache
*/
static kmem_cache_t *srb_cachep;
static struct kmem_cache *srb_cachep;
/*
* Ioctl related information.
......
......@@ -19,7 +19,7 @@ char qla4xxx_version_str[40];
/*
* SRB allocation cache
*/
static kmem_cache_t *srb_cachep;
static struct kmem_cache *srb_cachep;
/*
* Module parameter information and variables
......
......@@ -136,7 +136,7 @@ const char * scsi_device_type(unsigned type)
EXPORT_SYMBOL(scsi_device_type);
struct scsi_host_cmd_pool {
kmem_cache_t *slab;
struct kmem_cache *slab;
unsigned int users;
char *name;
unsigned int slab_flags;
......
......@@ -36,7 +36,7 @@
struct scsi_host_sg_pool {
size_t size;
char *name;
kmem_cache_t *slab;
struct kmem_cache *slab;
mempool_t *pool;
};
......@@ -241,7 +241,7 @@ struct scsi_io_context {
char sense[SCSI_SENSE_BUFFERSIZE];
};
static kmem_cache_t *scsi_io_context_cache;
static struct kmem_cache *scsi_io_context_cache;
static void scsi_end_async(struct request *req, int uptodate)
{
......
......@@ -33,7 +33,7 @@
#include "scsi_tgt_priv.h"
static struct workqueue_struct *scsi_tgtd;
static kmem_cache_t *scsi_tgt_cmd_cache;
static struct kmem_cache *scsi_tgt_cmd_cache;
/*
* TODO: this struct will be killed when the block layer supports large bios
......
......@@ -275,13 +275,13 @@ static volatile USB_SB_Desc_t TxIntrSB_zout __attribute__ ((aligned (4)));
static int zout_buffer[4] __attribute__ ((aligned (4)));
/* Cache for allocating new EP and SB descriptors. */
static kmem_cache_t *usb_desc_cache;
static struct kmem_cache *usb_desc_cache;
/* Cache for the registers allocated in the top half. */
static kmem_cache_t *top_half_reg_cache;
static struct kmem_cache *top_half_reg_cache;
/* Cache for the data allocated in the isoc descr top half. */
static kmem_cache_t *isoc_compl_cache;
static struct kmem_cache *isoc_compl_cache;
static struct usb_bus *etrax_usb_bus;
......
......@@ -81,7 +81,7 @@ MODULE_PARM_DESC(debug, "Debug level");
static char *errbuf;
#define ERRBUF_LEN (32 * 1024)
static kmem_cache_t *uhci_up_cachep; /* urb_priv */
static struct kmem_cache *uhci_up_cachep; /* urb_priv */
static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state);
static void wakeup_rh(struct uhci_hcd *uhci);
......
......@@ -50,7 +50,7 @@ struct mon_event_text {
#define SLAB_NAME_SZ 30
struct mon_reader_text {
kmem_cache_t *e_slab;
struct kmem_cache *e_slab;
int nevents;
struct list_head e_list;
struct mon_reader r; /* In C, parent class can be placed anywhere */
......@@ -63,7 +63,7 @@ struct mon_reader_text {
char slab_name[SLAB_NAME_SZ];
};
static void mon_text_ctor(void *, kmem_cache_t *, unsigned long);
static void mon_text_ctor(void *, struct kmem_cache *, unsigned long);
/*
* mon_text_submit
......@@ -450,7 +450,7 @@ const struct file_operations mon_fops_text = {
/*
* Slab interface: constructor.
*/
static void mon_text_ctor(void *mem, kmem_cache_t *slab, unsigned long sflags)
static void mon_text_ctor(void *mem, struct kmem_cache *slab, unsigned long sflags)
{
/*
* Nothing to initialize. No, really!
......
......@@ -212,7 +212,7 @@ static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
static kmem_cache_t *adfs_inode_cachep;
static struct kmem_cache *adfs_inode_cachep;
static struct inode *adfs_alloc_inode(struct super_block *sb)
{
......@@ -228,7 +228,7 @@ static void adfs_destroy_inode(struct inode *inode)
kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
......
......@@ -66,7 +66,7 @@ affs_write_super(struct super_block *sb)
pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean);
}
static kmem_cache_t * affs_inode_cachep;
static struct kmem_cache * affs_inode_cachep;
static struct inode *affs_alloc_inode(struct super_block *sb)
{
......@@ -83,7 +83,7 @@ static void affs_destroy_inode(struct inode *inode)
kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct affs_inode_info *ei = (struct affs_inode_info *) foo;
......
......@@ -35,7 +35,7 @@ struct afs_mount_params {
struct afs_volume *volume;
};
static void afs_i_init_once(void *foo, kmem_cache_t *cachep,
static void afs_i_init_once(void *foo, struct kmem_cache *cachep,
unsigned long flags);
static int afs_get_sb(struct file_system_type *fs_type,
......@@ -65,7 +65,7 @@ static struct super_operations afs_super_ops = {
.put_super = afs_put_super,
};