Commit e18b890b authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds
Browse files

[PATCH] slab: remove kmem_cache_t



Replace all uses of kmem_cache_t with struct kmem_cache.

The patch was generated using the following script:

	#!/bin/sh
	#
	# Replace one string by another in all the kernel sources.
	#

	set -e

	for file in `find * -name "*.c" -o -name "*.h"|xargs grep -l $1`; do
		quilt add $file
		sed -e "1,\$s/$1/$2/g" $file >/tmp/$$
		mv /tmp/$$ $file
		quilt refresh
	done

The script was run like this

	sh replace kmem_cache_t "struct kmem_cache"
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 441e143e
...@@ -77,7 +77,7 @@ To get this part of the dma_ API, you must #include <linux/dmapool.h> ...@@ -77,7 +77,7 @@ To get this part of the dma_ API, you must #include <linux/dmapool.h>
Many drivers need lots of small dma-coherent memory regions for DMA Many drivers need lots of small dma-coherent memory regions for DMA
descriptors or I/O buffers. Rather than allocating in units of a page descriptors or I/O buffers. Rather than allocating in units of a page
or more using dma_alloc_coherent(), you can use DMA pools. These work or more using dma_alloc_coherent(), you can use DMA pools. These work
much like a kmem_cache_t, except that they use the dma-coherent allocator much like a struct kmem_cache, except that they use the dma-coherent allocator
not __get_free_pages(). Also, they understand common hardware constraints not __get_free_pages(). Also, they understand common hardware constraints
for alignment, like queue heads needing to be aligned on N byte boundaries. for alignment, like queue heads needing to be aligned on N byte boundaries.
...@@ -94,7 +94,7 @@ The pool create() routines initialize a pool of dma-coherent buffers ...@@ -94,7 +94,7 @@ The pool create() routines initialize a pool of dma-coherent buffers
for use with a given device. It must be called in a context which for use with a given device. It must be called in a context which
can sleep. can sleep.
The "name" is for diagnostics (like a kmem_cache_t name); dev and size The "name" is for diagnostics (like a struct kmem_cache name); dev and size
are like what you'd pass to dma_alloc_coherent(). The device's hardware are like what you'd pass to dma_alloc_coherent(). The device's hardware
alignment requirement for this type of data is "align" (which is expressed alignment requirement for this type of data is "align" (which is expressed
in bytes, and must be a power of two). If your device has no boundary in bytes, and must be a power of two). If your device has no boundary
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
/* io map for dma */ /* io map for dma */
static void __iomem *dma_base; static void __iomem *dma_base;
static kmem_cache_t *dma_kmem; static struct kmem_cache *dma_kmem;
struct s3c24xx_dma_selection dma_sel; struct s3c24xx_dma_selection dma_sel;
...@@ -1271,7 +1271,7 @@ struct sysdev_class dma_sysclass = { ...@@ -1271,7 +1271,7 @@ struct sysdev_class dma_sysclass = {
/* kmem cache implementation */ /* kmem cache implementation */
static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f) static void s3c2410_dma_cache_ctor(void *p, struct kmem_cache *c, unsigned long f)
{ {
memset(p, 0, sizeof(struct s3c2410_dma_buf)); memset(p, 0, sizeof(struct s3c2410_dma_buf));
} }
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#define MEMC_TABLE_SIZE (256*sizeof(unsigned long)) #define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
kmem_cache_t *pte_cache, *pgd_cache; struct kmem_cache *pte_cache, *pgd_cache;
int page_nr; int page_nr;
/* /*
...@@ -162,12 +162,12 @@ void __init create_memmap_holes(struct meminfo *mi) ...@@ -162,12 +162,12 @@ void __init create_memmap_holes(struct meminfo *mi)
{ {
} }
static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags) static void pte_cache_ctor(void *pte, struct kmem_cache *cache, unsigned long flags)
{ {
memzero(pte, sizeof(pte_t) * PTRS_PER_PTE); memzero(pte, sizeof(pte_t) * PTRS_PER_PTE);
} }
static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags) static void pgd_cache_ctor(void *pgd, struct kmem_cache *cache, unsigned long flags)
{ {
memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t)); memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t));
} }
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE))); pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
kmem_cache_t *pgd_cache; struct kmem_cache *pgd_cache;
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
...@@ -100,7 +100,7 @@ static inline void pgd_list_del(pgd_t *pgd) ...@@ -100,7 +100,7 @@ static inline void pgd_list_del(pgd_t *pgd)
set_page_private(next, (unsigned long) pprev); set_page_private(next, (unsigned long) pprev);
} }
void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{ {
unsigned long flags; unsigned long flags;
...@@ -120,7 +120,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) ...@@ -120,7 +120,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
} }
/* never called when PTRS_PER_PMD > 1 */ /* never called when PTRS_PER_PMD > 1 */
void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{ {
unsigned long flags; /* can be called from interrupt context */ unsigned long flags; /* can be called from interrupt context */
......
...@@ -699,8 +699,8 @@ int remove_memory(u64 start, u64 size) ...@@ -699,8 +699,8 @@ int remove_memory(u64 start, u64 size)
#endif #endif
#endif #endif
kmem_cache_t *pgd_cache; struct kmem_cache *pgd_cache;
kmem_cache_t *pmd_cache; struct kmem_cache *pmd_cache;
void __init pgtable_cache_init(void) void __init pgtable_cache_init(void)
{ {
......
...@@ -193,7 +193,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -193,7 +193,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return pte; return pte;
} }
void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags) void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
{ {
memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
} }
...@@ -233,7 +233,7 @@ static inline void pgd_list_del(pgd_t *pgd) ...@@ -233,7 +233,7 @@ static inline void pgd_list_del(pgd_t *pgd)
set_page_private(next, (unsigned long)pprev); set_page_private(next, (unsigned long)pprev);
} }
void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{ {
unsigned long flags; unsigned long flags;
...@@ -253,7 +253,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) ...@@ -253,7 +253,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
} }
/* never called when PTRS_PER_PMD > 1 */ /* never called when PTRS_PER_PMD > 1 */
void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{ {
unsigned long flags; /* can be called from interrupt context */ unsigned long flags; /* can be called from interrupt context */
......
...@@ -249,7 +249,7 @@ ia32_init (void) ...@@ -249,7 +249,7 @@ ia32_init (void)
#if PAGE_SHIFT > IA32_PAGE_SHIFT #if PAGE_SHIFT > IA32_PAGE_SHIFT
{ {
extern kmem_cache_t *partial_page_cachep; extern struct kmem_cache *partial_page_cachep;
partial_page_cachep = kmem_cache_create("partial_page_cache", partial_page_cachep = kmem_cache_create("partial_page_cache",
sizeof(struct partial_page), 0, 0, sizeof(struct partial_page), 0, 0,
......
...@@ -254,7 +254,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro ...@@ -254,7 +254,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
} }
/* SLAB cache for partial_page structures */ /* SLAB cache for partial_page structures */
kmem_cache_t *partial_page_cachep; struct kmem_cache *partial_page_cachep;
/* /*
* init partial_page_list. * init partial_page_list.
......
...@@ -101,7 +101,7 @@ struct flash_block_list_header { /* just the header of flash_block_list */ ...@@ -101,7 +101,7 @@ struct flash_block_list_header { /* just the header of flash_block_list */
static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL}; static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
/* Use slab cache to guarantee 4k alignment */ /* Use slab cache to guarantee 4k alignment */
static kmem_cache_t *flash_block_cache = NULL; static struct kmem_cache *flash_block_cache = NULL;
#define FLASH_BLOCK_LIST_VERSION (1UL) #define FLASH_BLOCK_LIST_VERSION (1UL)
...@@ -286,7 +286,7 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf, ...@@ -286,7 +286,7 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
} }
/* constructor for flash_block_cache */ /* constructor for flash_block_cache */
void rtas_block_ctor(void *ptr, kmem_cache_t *cache, unsigned long flags) void rtas_block_ctor(void *ptr, struct kmem_cache *cache, unsigned long flags)
{ {
memset(ptr, 0, RTAS_BLK_SIZE); memset(ptr, 0, RTAS_BLK_SIZE);
} }
......
...@@ -1047,7 +1047,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -1047,7 +1047,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
return err; return err;
} }
static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{ {
memset(addr, 0, kmem_cache_size(cache)); memset(addr, 0, kmem_cache_size(cache));
} }
......
...@@ -141,7 +141,7 @@ static int __init setup_kcore(void) ...@@ -141,7 +141,7 @@ static int __init setup_kcore(void)
} }
module_init(setup_kcore); module_init(setup_kcore);
static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{ {
memset(addr, 0, kmem_cache_size(cache)); memset(addr, 0, kmem_cache_size(cache));
} }
...@@ -166,9 +166,9 @@ static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { ...@@ -166,9 +166,9 @@ static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
/* Hugepages need one extra cache, initialized in hugetlbpage.c. We /* Hugepages need one extra cache, initialized in hugetlbpage.c. We
* can't put into the tables above, because HPAGE_SHIFT is not compile * can't put into the tables above, because HPAGE_SHIFT is not compile
* time constant. */ * time constant. */
kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1]; struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
#else #else
kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
#endif #endif
void pgtable_cache_init(void) void pgtable_cache_init(void)
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include "spufs.h" #include "spufs.h"
static kmem_cache_t *spufs_inode_cache; static struct kmem_cache *spufs_inode_cache;
char *isolated_loader; char *isolated_loader;
static struct inode * static struct inode *
...@@ -65,7 +65,7 @@ spufs_destroy_inode(struct inode *inode) ...@@ -65,7 +65,7 @@ spufs_destroy_inode(struct inode *inode)
} }
static void static void
spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags) spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
{ {
struct spufs_inode_info *ei = p; struct spufs_inode_info *ei = p;
......
...@@ -38,7 +38,7 @@ struct sq_mapping { ...@@ -38,7 +38,7 @@ struct sq_mapping {
static struct sq_mapping *sq_mapping_list; static struct sq_mapping *sq_mapping_list;
static DEFINE_SPINLOCK(sq_mapping_lock); static DEFINE_SPINLOCK(sq_mapping_lock);
static kmem_cache_t *sq_cache; static struct kmem_cache *sq_cache;
static unsigned long *sq_bitmap; static unsigned long *sq_bitmap;
#define store_queue_barrier() \ #define store_queue_barrier() \
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#define NR_PMB_ENTRIES 16 #define NR_PMB_ENTRIES 16
static kmem_cache_t *pmb_cache; static struct kmem_cache *pmb_cache;
static unsigned long pmb_map; static unsigned long pmb_map;
static struct pmb_entry pmb_init_map[] = { static struct pmb_entry pmb_init_map[] = {
...@@ -283,7 +283,7 @@ void pmb_unmap(unsigned long addr) ...@@ -283,7 +283,7 @@ void pmb_unmap(unsigned long addr)
} while (pmbe); } while (pmbe);
} }
static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags) static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
{ {
struct pmb_entry *pmbe = pmb; struct pmb_entry *pmbe = pmb;
...@@ -297,7 +297,7 @@ static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags) ...@@ -297,7 +297,7 @@ static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
spin_unlock_irq(&pmb_list_lock); spin_unlock_irq(&pmb_list_lock);
} }
static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags) static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
{ {
spin_lock_irq(&pmb_list_lock); spin_lock_irq(&pmb_list_lock);
pmb_list_del(pmb); pmb_list_del(pmb);
......
...@@ -176,9 +176,9 @@ unsigned long sparc64_kern_sec_context __read_mostly; ...@@ -176,9 +176,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
int bigkernel = 0; int bigkernel = 0;
kmem_cache_t *pgtable_cache __read_mostly; struct kmem_cache *pgtable_cache __read_mostly;
static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{ {
clear_page(addr); clear_page(addr);
} }
......
...@@ -239,7 +239,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign ...@@ -239,7 +239,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
} }
} }
static kmem_cache_t *tsb_caches[8] __read_mostly; static struct kmem_cache *tsb_caches[8] __read_mostly;
static const char *tsb_cache_names[8] = { static const char *tsb_cache_names[8] = {
"tsb_8KB", "tsb_8KB",
......
...@@ -43,8 +43,8 @@ static int cfq_slice_idle = HZ / 125; ...@@ -43,8 +43,8 @@ static int cfq_slice_idle = HZ / 125;
#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq) ((rq)->elevator_private2) #define RQ_CFQQ(rq) ((rq)->elevator_private2)
static kmem_cache_t *cfq_pool; static struct kmem_cache *cfq_pool;
static kmem_cache_t *cfq_ioc_pool; static struct kmem_cache *cfq_ioc_pool;
static DEFINE_PER_CPU(unsigned long, ioc_count); static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone; static struct completion *ioc_gone;
......
...@@ -44,17 +44,17 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node); ...@@ -44,17 +44,17 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node);
/* /*
* For the allocated request tables * For the allocated request tables
*/ */
static kmem_cache_t *request_cachep; static struct kmem_cache *request_cachep;
/* /*
* For queue allocation * For queue allocation
*/ */
static kmem_cache_t *requestq_cachep; static struct kmem_cache *requestq_cachep;
/* /*
* For io context allocations * For io context allocations
*/ */
static kmem_cache_t *iocontext_cachep; static struct kmem_cache *iocontext_cachep;
/* /*
* Controlling structure to kblockd * Controlling structure to kblockd
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include "aoe.h" #include "aoe.h"
static kmem_cache_t *buf_pool_cache; static struct kmem_cache *buf_pool_cache;
static ssize_t aoedisk_show_state(struct gendisk * disk, char *page) static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
{ {
......
...@@ -133,7 +133,7 @@ struct eth1394_node_info { ...@@ -133,7 +133,7 @@ struct eth1394_node_info {
#define ETH1394_DRIVER_NAME "eth1394" #define ETH1394_DRIVER_NAME "eth1394"
static const char driver_name[] = ETH1394_DRIVER_NAME; static const char driver_name[] = ETH1394_DRIVER_NAME;
static kmem_cache_t *packet_task_cache; static struct kmem_cache *packet_task_cache;
static struct hpsb_highlevel eth1394_highlevel; static struct hpsb_highlevel eth1394_highlevel;
......
...@@ -101,7 +101,7 @@ struct crypt_config { ...@@ -101,7 +101,7 @@ struct crypt_config {
#define MIN_POOL_PAGES 32 #define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES 8 #define MIN_BIO_PAGES 8
static kmem_cache_t *_crypt_io_pool; static struct kmem_cache *_crypt_io_pool;
/* /*
* Different IV generation algorithms: * Different IV generation algorithms:
......
...@@ -101,7 +101,7 @@ typedef int (*action_fn) (struct pgpath *pgpath); ...@@ -101,7 +101,7 @@ typedef int (*action_fn) (struct pgpath *pgpath);
#define MIN_IOS 256 /* Mempool size */ #define MIN_IOS 256 /* Mempool size */
static kmem_cache_t *_mpio_cache; static struct kmem_cache *_mpio_cache;
struct workqueue_struct *kmultipathd; struct workqueue_struct *kmultipathd;
static void process_queued_ios(struct work_struct *work); static void process_queued_ios(struct work_struct *work);
......
...@@ -88,8 +88,8 @@ struct pending_exception { ...@@ -88,8 +88,8 @@ struct pending_exception {
* Hash table mapping origin volumes to lists of snapshots and * Hash table mapping origin volumes to lists of snapshots and
* a lock to protect it * a lock to protect it
*/ */
static kmem_cache_t *exception_cache; static struct kmem_cache *exception_cache;
static kmem_cache_t *pending_cache; static struct kmem_cache *pending_cache;
static mempool_t *pending_pool; static mempool_t *pending_pool;
/* /*
...@@ -228,7 +228,7 @@ static int init_exception_table(struct exception_table *et, uint32_t size) ...@@ -228,7 +228,7 @@ static int init_exception_table(struct exception_table *et, uint32_t size)
return 0; return 0;
} }
static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem) static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
{ {
struct list_head *slot; struct list_head *slot;
struct exception *ex, *next; struct exception *ex, *next;
......
...@@ -121,8 +121,8 @@ struct mapped_device { ...@@ -121,8 +121,8 @@ struct mapped_device {
}; };
#define MIN_IOS 256 #define MIN_IOS 256
static kmem_cache_t *_io_cache; static struct kmem_cache *_io_cache;
static kmem_cache_t *_tio_cache; static struct kmem_cache *_tio_cache;
static int __init local_init(void) static int __init local_init(void)
{ {
......
...@@ -203,7 +203,7 @@ struct kcopyd_job { ...@@ -203,7 +203,7 @@ struct kcopyd_job {
/* FIXME: this should scale with the number of pages */ /* FIXME: this should scale with the number of pages */
#define MIN_JOBS 512 #define MIN_JOBS 512
static kmem_cache_t *_job_cache; static struct kmem_cache *_job_cache;
static mempool_t *_job_pool; static mempool_t *_job_pool;
/* /*
......
...@@ -348,7 +348,7 @@ static int grow_one_stripe(raid5_conf_t *conf) ...@@ -348,7 +348,7 @@ static int grow_one_stripe(raid5_conf_t *conf)
static int grow_stripes(raid5_conf_t *conf, int num) static int grow_stripes(raid5_conf_t *conf, int num)
{ {
kmem_cache_t *sc; struct kmem_cache *sc;
int devs = conf->raid_disks; int devs = conf->raid_disks;
sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev)); sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
...@@ -397,7 +397,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) ...@@ -397,7 +397,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
LIST_HEAD(newstripes); LIST_HEAD(newstripes);
struct disk_info *