Commit dd0fc66f authored by Al Viro's avatar Al Viro Committed by Linus Torvalds

[PATCH] gfp flags annotations - part 1

 - added typedef unsigned int __nocast gfp_t;

 - replaced __nocast uses for gfp flags with gfp_t - it gives exactly
   the same warnings as far as sparse is concerned, doesn't change
   generated code (from gcc point of view we replaced unsigned int with
   typedef) and documents what's going on far better.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 3b0e77bd
......@@ -24,7 +24,7 @@ struct dma_coherent_mem {
};
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, unsigned int __nocast gfp)
dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
......
......@@ -23,7 +23,7 @@ struct dma_coherent_mem {
};
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, unsigned int __nocast gfp)
dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
......
......@@ -310,7 +310,7 @@ static void bpa_map_iommu(void)
static void *bpa_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, unsigned int __nocast flag)
dma_addr_t *dma_handle, gfp_t flag)
{
void *ret;
......
......@@ -53,7 +53,7 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
EXPORT_SYMBOL(dma_set_mask);
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, unsigned int __nocast flag)
dma_addr_t *dma_handle, gfp_t flag)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
......
......@@ -519,7 +519,7 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
dma_addr_t *dma_handle, unsigned int __nocast flag)
dma_addr_t *dma_handle, gfp_t flag)
{
void *ret = NULL;
dma_addr_t mapping;
......
......@@ -31,7 +31,7 @@
#include "pci.h"
static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, unsigned int __nocast flag)
dma_addr_t *dma_handle, gfp_t flag)
{
void *ret;
......
......@@ -76,7 +76,7 @@ static inline struct iommu_table *devnode_table(struct device *dev)
* to the dma address (mapping) of the first page.
*/
static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, unsigned int __nocast flag)
dma_addr_t *dma_handle, gfp_t flag)
{
return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
flag);
......
......@@ -218,7 +218,7 @@ static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist,
}
static void *vio_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, unsigned int __nocast flag)
dma_addr_t *dma_handle, gfp_t flag)
{
return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
dma_handle, flag);
......
......@@ -795,7 +795,7 @@ static void drain_rx_pools (amb_dev * dev) {
}
static inline void fill_rx_pool (amb_dev * dev, unsigned char pool,
unsigned int __nocast priority)
gfp_t priority)
{
rx_in rx;
amb_rxq * rxq;
......
......@@ -1374,8 +1374,7 @@ static void reset_chip (struct fs_dev *dev)
}
}
static void __devinit *aligned_kmalloc (int size, unsigned int __nocast flags,
int alignment)
static void __devinit *aligned_kmalloc (int size, gfp_t flags, int alignment)
{
void *t;
......@@ -1466,7 +1465,7 @@ static inline int nr_buffers_in_freepool (struct fs_dev *dev, struct freepool *f
working again after that... -- REW */
static void top_off_fp (struct fs_dev *dev, struct freepool *fp,
unsigned int __nocast gfp_flags)
gfp_t gfp_flags)
{
struct FS_BPENTRY *qe, *ne;
struct sk_buff *skb;
......
......@@ -178,7 +178,7 @@ fore200e_irq_itoa(int irq)
static void*
fore200e_kmalloc(int size, unsigned int __nocast flags)
fore200e_kmalloc(int size, gfp_t flags)
{
void *chunk = kzalloc(size, flags);
......
......@@ -156,7 +156,7 @@ dma_pool_create (const char *name, struct device *dev,
static struct dma_page *
pool_alloc_page (struct dma_pool *pool, unsigned int __nocast mem_flags)
pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags)
{
struct dma_page *page;
int mapsize;
......@@ -262,8 +262,7 @@ dma_pool_destroy (struct dma_pool *pool)
* If such a memory block can't be allocated, null is returned.
*/
void *
dma_pool_alloc (struct dma_pool *pool, unsigned int __nocast mem_flags,
dma_addr_t *handle)
dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
{
unsigned long flags;
struct dma_page *page;
......
......@@ -229,7 +229,7 @@ static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
return 1;
}
static void *pkt_rb_alloc(unsigned int __nocast gfp_mask, void *data)
static void *pkt_rb_alloc(gfp_t gfp_mask, void *data)
{
return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
}
......@@ -2082,7 +2082,7 @@ static int pkt_close(struct inode *inode, struct file *file)
}
static void *psd_pool_alloc(unsigned int __nocast gfp_mask, void *data)
static void *psd_pool_alloc(gfp_t gfp_mask, void *data)
{
return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
}
......
......@@ -308,7 +308,7 @@ static void bpa10x_complete(struct urb *urb, struct pt_regs *regs)
}
static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe,
size_t size, unsigned int __nocast flags, void *data)
size_t size, gfp_t flags, void *data)
{
struct urb *urb;
struct usb_ctrlrequest *cr;
......
......@@ -132,7 +132,7 @@ static struct usb_device_id blacklist_ids[] = {
{ } /* Terminating entry */
};
static struct _urb *_urb_alloc(int isoc, unsigned int __nocast gfp)
static struct _urb *_urb_alloc(int isoc, gfp_t gfp)
{
struct _urb *_urb = kmalloc(sizeof(struct _urb) +
sizeof(struct usb_iso_packet_descriptor) * isoc, gfp);
......
......@@ -69,8 +69,7 @@ int cn_already_initialized = 0;
* a new message.
*
*/
int cn_netlink_send(struct cn_msg *msg, u32 __group,
unsigned int __nocast gfp_mask)
int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
{
struct cn_callback_entry *__cbq;
unsigned int size;
......
......@@ -98,7 +98,7 @@ static struct hpsb_address_ops arm_ops = {
static void queue_complete_cb(struct pending_request *req);
static struct pending_request *__alloc_pending_request(unsigned int __nocast flags)
static struct pending_request *__alloc_pending_request(gfp_t flags)
{
struct pending_request *req;
......
......@@ -783,7 +783,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
u32 remote_qpn, u16 pkey_index,
struct ib_ah *ah, int rmpp_active,
int hdr_len, int data_len,
unsigned int __nocast gfp_mask)
gfp_t gfp_mask)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_buf *send_buf;
......
......@@ -574,7 +574,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
struct ib_sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
int timeout_ms, unsigned int __nocast gfp_mask,
int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_path_rec *resp,
void *context),
......@@ -676,7 +676,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
struct ib_sa_service_rec *rec,
ib_sa_comp_mask comp_mask,
int timeout_ms, unsigned int __nocast gfp_mask,
int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_service_rec *resp,
void *context),
......@@ -759,7 +759,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
int timeout_ms, unsigned int __nocast gfp_mask,
int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
......
......@@ -96,7 +96,7 @@ static kmem_cache_t *_crypt_io_pool;
/*
* Mempool alloc and free functions for the page
*/
static void *mempool_alloc_page(unsigned int __nocast gfp_mask, void *data)
static void *mempool_alloc_page(gfp_t gfp_mask, void *data)
{
return alloc_page(gfp_mask);
}
......
......@@ -32,7 +32,7 @@ struct io {
static unsigned _num_ios;
static mempool_t *_io_pool;
static void *alloc_io(unsigned int __nocast gfp_mask, void *pool_data)
static void *alloc_io(gfp_t gfp_mask, void *pool_data)
{
return kmalloc(sizeof(struct io), gfp_mask);
}
......
......@@ -122,7 +122,7 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
/* FIXME move this */
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
static void *region_alloc(unsigned int __nocast gfp_mask, void *pool_data)
static void *region_alloc(gfp_t gfp_mask, void *pool_data)
{
return kmalloc(sizeof(struct region), gfp_mask);
}
......
......@@ -38,7 +38,7 @@
static mdk_personality_t multipath_personality;
static void *mp_pool_alloc(unsigned int __nocast gfp_flags, void *data)
static void *mp_pool_alloc(gfp_t gfp_flags, void *data)
{
struct multipath_bh *mpb;
mpb = kmalloc(sizeof(*mpb), gfp_flags);
......
......@@ -52,7 +52,7 @@ static mdk_personality_t raid1_personality;
static void unplug_slaves(mddev_t *mddev);
static void * r1bio_pool_alloc(unsigned int __nocast gfp_flags, void *data)
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
r1bio_t *r1_bio;
......@@ -79,7 +79,7 @@ static void r1bio_pool_free(void *r1_bio, void *data)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
#define RESYNC_WINDOW (2048*1024)
static void * r1buf_pool_alloc(unsigned int __nocast gfp_flags, void *data)
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
struct page *page;
......
......@@ -47,7 +47,7 @@
static void unplug_slaves(mddev_t *mddev);
static void * r10bio_pool_alloc(unsigned int __nocast gfp_flags, void *data)
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
conf_t *conf = data;
r10bio_t *r10_bio;
......@@ -81,7 +81,7 @@ static void r10bio_pool_free(void *r10_bio, void *data)
* one for write (we recover only one drive per r10buf)
*
*/
static void * r10buf_pool_alloc(unsigned int __nocast gfp_flags, void *data)
static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
{
conf_t *conf = data;
struct page *page;
......
......@@ -1290,7 +1290,7 @@ static void bond_mc_list_destroy(struct bonding *bond)
* Copy all the Multicast addresses from src to the bonding device dst
*/
static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
unsigned int __nocast gfp_flag)
gfp_t gfp_flag)
{
struct dev_mc_list *dmi, *new_dmi;
......
......@@ -584,7 +584,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
return 0;
}
static inline int rx_refill(struct net_device *ndev, unsigned int __nocast gfp)
static inline int rx_refill(struct net_device *ndev, gfp_t gfp)
{
struct ns83820 *dev = PRIV(ndev);
unsigned i;
......
......@@ -1036,7 +1036,7 @@ struct gem {
#define ALIGNED_RX_SKB_ADDR(addr) \
((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
static __inline__ struct sk_buff *gem_alloc_skb(int size,
unsigned int __nocast gfp_flags)
gfp_t gfp_flags)
{
struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
......
......@@ -833,7 +833,7 @@ zfcp_unit_dequeue(struct zfcp_unit *unit)
}
static void *
zfcp_mempool_alloc(unsigned int __nocast gfp_mask, void *size)
zfcp_mempool_alloc(gfp_t gfp_mask, void *size)
{
return kmalloc((size_t) size, gfp_mask);
}
......
......@@ -75,7 +75,7 @@ struct bio_set {
*/
static struct bio_set *fs_bio_set;
static inline struct bio_vec *bvec_alloc_bs(unsigned int __nocast gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
{
struct bio_vec *bvl;
struct biovec_slab *bp;
......@@ -155,7 +155,7 @@ inline void bio_init(struct bio *bio)
* allocate bio and iovecs from the memory pools specified by the
* bio_set structure.
**/
struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, struct bio_set *bs)
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask);
......@@ -181,7 +181,7 @@ struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, stru
return bio;
}
struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs)
struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
{
struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
......@@ -277,7 +277,7 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src)
*
* Like __bio_clone, only also allocates the returned bio
*/
struct bio *bio_clone(struct bio *bio, unsigned int __nocast gfp_mask)
struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
{
struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
......@@ -1078,7 +1078,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
return bp;
}
static void *bio_pair_alloc(unsigned int __nocast gfp_flags, void *data)
static void *bio_pair_alloc(gfp_t gfp_flags, void *data)
{
return kmalloc(sizeof(struct bio_pair), gfp_flags);
}
......
......@@ -3045,7 +3045,7 @@ static void recalc_bh_state(void)
buffer_heads_over_limit = (tot > max_buffer_heads);
}
struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
if (ret) {
......
......@@ -102,7 +102,7 @@ static struct bio *mpage_bio_submit(int rw, struct bio *bio)
static struct bio *
mpage_alloc(struct block_device *bdev,
sector_t first_sector, int nr_vecs,
unsigned int __nocast gfp_flags)
gfp_t gfp_flags)
{
struct bio *bio;
......
......@@ -40,7 +40,7 @@
* Depending on @gfp_mask the allocation may be guaranteed to succeed.
*/
static inline void *__ntfs_malloc(unsigned long size,
unsigned int __nocast gfp_mask)
gfp_t gfp_mask)
{
if (likely(size <= PAGE_SIZE)) {
BUG_ON(!size);
......
......@@ -35,7 +35,7 @@ EXPORT_SYMBOL(posix_acl_permission);
* Allocate a new ACL with the specified number of entries.
*/
struct posix_acl *
posix_acl_alloc(int count, unsigned int __nocast flags)
posix_acl_alloc(int count, gfp_t flags)
{
const size_t size = sizeof(struct posix_acl) +
count * sizeof(struct posix_acl_entry);
......@@ -51,7 +51,7 @@ posix_acl_alloc(int count, unsigned int __nocast flags)
* Clone an ACL.
*/
struct posix_acl *
posix_acl_clone(const struct posix_acl *acl, unsigned int __nocast flags)
posix_acl_clone(const struct posix_acl *acl, gfp_t flags)
{
struct posix_acl *clone = NULL;
......@@ -185,7 +185,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, mode_t *mode_p)
* Create an ACL representing the file mode permission bits of an inode.
*/
struct posix_acl *
posix_acl_from_mode(mode_t mode, unsigned int __nocast flags)
posix_acl_from_mode(mode_t mode, gfp_t flags)
{
struct posix_acl *acl = posix_acl_alloc(3, flags);
if (!acl)
......
......@@ -45,7 +45,7 @@
void *
kmem_alloc(size_t size, unsigned int __nocast flags)
kmem_alloc(size_t size, gfp_t flags)
{
int retries = 0;
unsigned int lflags = kmem_flags_convert(flags);
......@@ -67,7 +67,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
}
void *
kmem_zalloc(size_t size, unsigned int __nocast flags)
kmem_zalloc(size_t size, gfp_t flags)
{
void *ptr;
......@@ -90,7 +90,7 @@ kmem_free(void *ptr, size_t size)
void *
kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
unsigned int __nocast flags)
gfp_t flags)
{
void *new;
......@@ -105,7 +105,7 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
}
void *
kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags)
{
int retries = 0;
unsigned int lflags = kmem_flags_convert(flags);
......@@ -124,7 +124,7 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
}
void *
kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
kmem_zone_zalloc(kmem_zone_t *zone, gfp_t flags)
{
void *ptr;
......
......@@ -81,7 +81,7 @@ typedef unsigned long xfs_pflags_t;
*(NSTATEP) = *(OSTATEP); \
} while (0)
static __inline unsigned int kmem_flags_convert(unsigned int __nocast flags)
static __inline unsigned int kmem_flags_convert(gfp_t flags)
{
unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */
......@@ -125,13 +125,12 @@ kmem_zone_destroy(kmem_zone_t *zone)
BUG();
}
extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
extern void *kmem_zone_zalloc(kmem_zone_t *, gfp_t);
extern void *kmem_zone_alloc(kmem_zone_t *, gfp_t);
extern void *kmem_alloc(size_t, unsigned int __nocast);
extern void *kmem_realloc(void *, size_t, size_t,
unsigned int __nocast);
extern void *kmem_zalloc(size_t, unsigned int __nocast);
extern void *kmem_alloc(size_t, gfp_t);
extern void *kmem_realloc(void *, size_t, size_t, gfp_t);
extern void *kmem_zalloc(size_t, gfp_t);
extern void kmem_free(void *, size_t);
typedef struct shrinker *kmem_shaker_t;
......
......@@ -35,7 +35,7 @@ dma_set_mask(struct device *dev, u64 dma_mask)
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
unsigned int __nocast flag)
gfp_t flag)
{
BUG_ON(dev->bus != &pci_bus_type);
......@@ -168,7 +168,7 @@ dma_set_mask(struct device *dev, u64 dma_mask)
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
unsigned int __nocast flag)
gfp_t flag)
{
BUG();
return NULL;
......
......@@ -11,7 +11,7 @@
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, unsigned int __nocast flag);
dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
......
......@@ -61,7 +61,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle,
unsigned int __nocast gfp)
gfp_t gfp)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
return __dma_alloc_coherent(size, dma_handle, gfp);
......
......@@ -19,7 +19,7 @@
extern int dma_supported(struct device *dev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, unsigned int __nocast flag);
dma_addr_t *dma_handle, gfp_t flag);
extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle);
extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
......@@ -118,7 +118,7 @@ dma_cache_sync(void *vaddr, size_t size,
*/
struct dma_mapping_ops {
void * (*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, unsigned int __nocast flag);
dma_addr_t *dma_handle, gfp_t flag);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *dev, void *ptr,
......
......@@ -122,7 +122,7 @@ extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction);
extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
dma_addr_t *dma_handle, unsigned int __nocast flag);
dma_addr_t *dma_handle, gfp_t flag);
extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
......
......@@ -467,7 +467,7 @@ static inline void atm_dev_put(struct atm_dev *dev)
int atm_charge(struct atm_vcc *vcc,int truesize);
struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
unsigned int __nocast gfp_flags);
gfp_t gfp_flags);
int atm_pcr_goal(struct atm_trafprm *tp);
void vcc_release_async(struct atm_vcc *vcc, int reply);
......
......@@ -276,8 +276,8 @@ extern void bio_pair_release(struct bio_pair *dbio);
extern struct bio_set *bioset_create(int, int, int);
extern void bioset_free(struct bio_set *);
extern struct bio *bio_alloc(unsigned int __nocast, int);
extern struct bio *bio_alloc_bioset(unsigned int __nocast, int, struct bio_set *);
extern struct bio *bio_alloc(gfp_t, int);
extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
extern void bio_put(struct bio *);
extern void bio_free(struct bio *, struct bio_set *);
......@@ -287,7 +287,7 @@ extern int bio_phys_segments(struct request_queue *, struct bio *);
extern int bio_hw_segments(struct request_queue *, struct bio *);
extern void __bio_clone(struct bio *, struct bio *);
extern struct bio *bio_clone(struct bio *, unsigned int __nocast);
extern struct bio *bio_clone(struct bio *, gfp_t);
extern void bio_init(struct bio *);
......
......@@ -172,7 +172,7 @@ void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, int size);
struct buffer_head *__bread(struct block_device *, sector_t block, int size);
struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void FASTCALL(unlock_buffer(struct buffer_head *bh));
void FASTCALL(__lock_buffer(struct buffer_head *bh));
......
......@@ -149,7 +149,7 @@ struct cn_dev {
int cn_add_callback(struct cb_id *, char *, void (*callback) (void *));
void cn_del_callback(struct cb_id *);
int cn_netlink_send(struct cn_msg *, u32, unsigned int __nocast);
int cn_netlink_send(struct cn_msg *, u32, gfp_t);
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
......
......@@ -23,7 +23,7 @@ void cpuset_init_current_mems_allowed(void);
void cpuset_update_current_mems_allowed(void);
void cpuset_restrict_to_mems_allowed(unsigned long *nodes);
int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl);
extern int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask);
extern int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask);
extern int cpuset_excl_nodes_overlap(const struct task_struct *p);
extern struct file_operations proc_cpuset_operations;
extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer);
......@@ -49,8 +49,7 @@ static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
return 1;
}
static inline int cpuset_zone_allowed(struct zone *z,
unsigned int __nocast gfp_mask)
static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return 1;
}
......
......@@ -19,7 +19,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
void dma_pool_destroy(struct dma_pool *pool);
void *dma_pool_alloc(struct dma_pool *pool, unsigned int __nocast mem_flags,
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
......