Commit 5f713760 authored by Stephen Rothwell's avatar Stephen Rothwell

Merge branch 'akpm-current/current'

parents 8a6290d8 1ead3e85
*.c diff=cpp
*.h diff=cpp
*.dtsi diff=dts
*.dts diff=dts
......@@ -45,6 +45,7 @@ show up in /proc/sys/kernel:
- hung_task_timeout_secs
- hung_task_check_interval_secs
- hung_task_warnings
- hung_task_interval_warnings
- hyperv_record_panic_msg
- kexec_load_disabled
- kptr_restrict
......@@ -383,14 +384,29 @@ Possible values to set are in range {0..LONG_MAX/HZ}.
hung_task_warnings:
===================
The maximum number of warnings to report. During a check interval
if a hung task is detected, this value is decreased by 1.
The maximum number of warnings to report. If after timeout a hung
task is present, this value is decreased by 1 every check interval,
producing a warning.
When this value reaches 0, no more warnings will be reported.
This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
-1: report an infinite number of warnings.
hung_task_interval_warnings:
===================
The same as hung_task_warnings, but set the number of interval
warnings to be issued about detected hung tasks during check
interval. That will produce warnings *before* the timeout happens.
If a hung task is detected during check interval, this value is
decreased by 1. When this value reaches 0, only timeout warnings
will be reported.
This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
-1: report an infinite number of check interval warnings.
hyperv_record_panic_msg:
========================
......
#
# Feature name: debug-vm-pgtable
# Kconfig: ARCH_HAS_DEBUG_VM_PGTABLE
# description: arch supports pgtable tests for semantics compliance
#
-----------------------
| arch |status|
-----------------------
| alpha: | TODO |
| arc: | TODO |
| arm: | TODO |
| arm64: | ok |
| c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| m68k: | TODO |
| microblaze: | TODO |
| mips: | TODO |
| nds32: | TODO |
| nios2: | TODO |
| openrisc: | TODO |
| parisc: | TODO |
| powerpc: | TODO |
| riscv: | TODO |
| s390: | TODO |
| sh: | TODO |
| sparc: | TODO |
| um: | TODO |
| unicore32: | TODO |
| x86: | ok |
| xtensa: | TODO |
-----------------------
......@@ -84,7 +84,7 @@ buffer. This could result in linear overflows beyond the
end of the buffer, leading to all kinds of misbehaviors. While
`CONFIG_FORTIFY_SOURCE=y` and various compiler flags help reduce the
risk of using this function, there is no good reason to add new uses of
this function. The safe replacement is :c:func:`strscpy`.
this function. The safe replacement is stracpy() or strscpy().
strncpy() on NUL-terminated strings
-----------------------------------
......@@ -93,9 +93,9 @@ will be NUL terminated. This can lead to various linear read overflows
and other misbehavior due to the missing termination. It also NUL-pads the
destination buffer if the source contents are shorter than the destination
buffer size, which may be a needless performance penalty for callers using
only NUL-terminated strings. The safe replacement is :c:func:`strscpy`.
(Users of :c:func:`strscpy` still needing NUL-padding will need an
explicit :c:func:`memset` added.)
only NUL-terminated strings. In this case, the safe replacement is
stracpy() or strscpy(). If, however, the destination buffer still needs
NUL-padding, the safe replacement is stracpy_pad().
If a caller is using non-NUL-terminated strings, :c:func:`strncpy()` can
still be used, but destinations should be marked with the `__nonstring
......@@ -107,7 +107,7 @@ strlcpy()
:c:func:`strlcpy` reads the entire source buffer first, possibly exceeding
the given limit of bytes to copy. This is inefficient and can lead to
linear read overflows if a source string is not NUL-terminated. The
safe replacement is :c:func:`strscpy`.
safe replacement is stracpy() or strscpy().
Variable Length Arrays (VLAs)
-----------------------------
......
......@@ -11,6 +11,7 @@ config ARM64
select ACPI_PPTT if ACPI
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT
......
......@@ -1069,7 +1069,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
/*
* FIXME: Cleanup page tables (also in arch_add_memory() in case
......@@ -1078,7 +1077,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
* unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
* unlocked yet.
*/
zone = page_zone(pfn_to_page(start_pfn));
__remove_pages(zone, start_pfn, nr_pages, altmap);
__remove_pages(start_pfn, nr_pages, altmap);
}
#endif
......@@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
zone = page_zone(pfn_to_page(start_pfn));
__remove_pages(zone, start_pfn, nr_pages, altmap);
__remove_pages(start_pfn, nr_pages, altmap);
}
#endif
......@@ -130,10 +130,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
int ret;
__remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
__remove_pages(start_pfn, nr_pages, altmap);
/* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start);
......
......@@ -291,10 +291,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
zone = page_zone(pfn_to_page(start_pfn));
__remove_pages(zone, start_pfn, nr_pages, altmap);
__remove_pages(start_pfn, nr_pages, altmap);
vmem_remove_mapping(start, size);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
......@@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = PFN_DOWN(start);
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
zone = page_zone(pfn_to_page(start_pfn));
__remove_pages(zone, start_pfn, nr_pages, altmap);
__remove_pages(start_pfn, nr_pages, altmap);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
......@@ -61,6 +61,7 @@ config X86
select ARCH_CLOCKSOURCE_INIT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
......
......@@ -53,6 +53,12 @@ static inline void sync_initial_page_table(void) { }
struct mm_struct;
#define mm_p4d_folded mm_p4d_folded
static inline bool mm_p4d_folded(struct mm_struct *mm)
{
return !pgtable_l5_enabled();
}
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
......
......@@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
zone = page_zone(pfn_to_page(start_pfn));
__remove_pages(zone, start_pfn, nr_pages, altmap);
__remove_pages(start_pfn, nr_pages, altmap);
}
#endif
......
......@@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
struct zone *zone = page_zone(page);
__remove_pages(zone, start_pfn, nr_pages, altmap);
__remove_pages(start_pfn, nr_pages, altmap);
kernel_physical_mapping_remove(start, start + size);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
......
......@@ -540,6 +540,9 @@ static ssize_t soft_offline_page_store(struct device *dev,
pfn >>= PAGE_SHIFT;
if (!pfn_valid(pfn))
return -ENXIO;
/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
if (!pfn_to_online_page(pfn))
return -EIO;
ret = soft_offline_page(pfn_to_page(pfn), 0);
return ret == 0 ? count : ret;
}
......
......@@ -679,9 +679,7 @@ static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
__ClearPageOffline(pg);
/* This frame is currently backed; online the page. */
__online_page_set_limits(pg);
__online_page_increment_counters(pg);
__online_page_free(pg);
generic_online_page(pg, 0);
lockdep_assert_held(&dm_device.ha_lock);
dm_device.num_pages_onlined++;
......
......@@ -374,7 +374,6 @@ static void xen_online_page(struct page *page, unsigned int order)
mutex_lock(&balloon_mutex);
for (i = 0; i < size; i++) {
p = pfn_to_page(start_pfn + i);
__online_page_set_limits(p);
balloon_append(p);
}
mutex_unlock(&balloon_mutex);
......
......@@ -1287,12 +1287,9 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
* the ringbuffer empty. So in practice we should be ok, but it's
* something to be aware of when touching this code.
*/
if (until == 0)
aio_read_events(ctx, min_nr, nr, event, &ret);
else
wait_event_interruptible_hrtimeout(ctx->wait,
aio_read_events(ctx, min_nr, nr, event, &ret),
until);
wait_event_interruptible_hrtimeout(ctx->wait,
aio_read_events(ctx, min_nr, nr, event, &ret),
until);
return ret;
}
......
......@@ -404,6 +404,17 @@ static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
ELF_PAGESTART(cmds[first_idx].p_vaddr);
}
static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
{
ssize_t rv;
rv = kernel_read(file, buf, len, &pos);
if (unlikely(rv != len)) {
return (rv < 0) ? rv : -EIO;
}
return 0;
}
/**
* load_elf_phdrs() - load ELF program headers
* @elf_ex: ELF header of the binary whose program headers should be loaded
......@@ -418,7 +429,6 @@ static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
{
struct elf_phdr *elf_phdata = NULL;
int retval, err = -1;
loff_t pos = elf_ex->e_phoff;
unsigned int size;
/*
......@@ -439,9 +449,9 @@ static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
goto out;
/* Read in the program headers */
retval = kernel_read(elf_file, elf_phdata, size, &pos);
if (retval != size) {
err = (retval < 0) ? retval : -EIO;
retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
if (retval < 0) {
err = retval;
goto out;
}
......@@ -544,7 +554,7 @@ static inline int make_prot(u32 p_flags)
an ELF header */
static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
struct file *interpreter, unsigned long *interp_map_addr,
struct file *interpreter,
unsigned long no_base, struct elf_phdr *interp_elf_phdata)
{
struct elf_phdr *eppnt;
......@@ -590,8 +600,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
map_addr = elf_map(interpreter, load_addr + vaddr,
eppnt, elf_prot, elf_type, total_size);
total_size = 0;
if (!*interp_map_addr)
*interp_map_addr = map_addr;
error = map_addr;
if (BAD_ADDR(map_addr))
goto out;
......@@ -722,7 +730,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
elf_ppnt = elf_phdata;
for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
char *elf_interpreter;
loff_t pos;
if (elf_ppnt->p_type != PT_INTERP)
continue;
......@@ -740,14 +747,10 @@ static int load_elf_binary(struct linux_binprm *bprm)
if (!elf_interpreter)
goto out_free_ph;
pos = elf_ppnt->p_offset;
retval = kernel_read(bprm->file, elf_interpreter,
elf_ppnt->p_filesz, &pos);
if (retval != elf_ppnt->p_filesz) {
if (retval >= 0)
retval = -EIO;
retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz,
elf_ppnt->p_offset);
if (retval < 0)
goto out_free_interp;
}
/* make sure path is NULL terminated */
retval = -ENOEXEC;
if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
......@@ -766,14 +769,10 @@ static int load_elf_binary(struct linux_binprm *bprm)
would_dump(bprm, interpreter);
/* Get the exec headers */
pos = 0;
retval = kernel_read(interpreter, &loc->interp_elf_ex,
sizeof(loc->interp_elf_ex), &pos);
if (retval != sizeof(loc->interp_elf_ex)) {
if (retval >= 0)
retval = -EIO;
retval = elf_read(interpreter, &loc->interp_elf_ex,
sizeof(loc->interp_elf_ex), 0);
if (retval < 0)
goto out_free_dentry;
}
break;
......@@ -1054,11 +1053,8 @@ static int load_elf_binary(struct linux_binprm *bprm)
}
if (interpreter) {
unsigned long interp_map_addr = 0;
elf_entry = load_elf_interp(&loc->interp_elf_ex,
interpreter,
&interp_map_addr,
load_bias, interp_elf_phdata);
if (!IS_ERR((void *)elf_entry)) {
/*
......@@ -1179,11 +1175,10 @@ static int load_elf_library(struct file *file)
unsigned long elf_bss, bss, len;
int retval, error, i, j;
struct elfhdr elf_ex;
loff_t pos = 0;
error = -ENOEXEC;
retval = kernel_read(file, &elf_ex, sizeof(elf_ex), &pos);
if (retval != sizeof(elf_ex))
retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
if (retval < 0)
goto out;
if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
......@@ -1208,9 +1203,8 @@ static int load_elf_library(struct file *file)
eppnt = elf_phdata;
error = -ENOEXEC;
pos = elf_ex.e_phoff;
retval = kernel_read(file, eppnt, j, &pos);
if (retval != j)
retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
if (retval < 0)
goto out_free_ph;
for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
......
......@@ -956,10 +956,20 @@ grow_dev_page(struct block_device *bdev, sector_t block,
end_block = init_page_buffers(page, bdev,
(sector_t)index << sizebits,
size);
#ifdef CONFIG_DEBUG_AID_FOR_SYZBOT
current->getblk_executed |= 0x01;
#endif
goto done;
}
if (!try_to_free_buffers(page))
if (!try_to_free_buffers(page)) {
#ifdef CONFIG_DEBUG_AID_FOR_SYZBOT
current->getblk_executed |= 0x02;
#endif
goto failed;
}
#ifdef CONFIG_DEBUG_AID_FOR_SYZBOT
current->getblk_executed |= 0x04;
#endif
}
/*
......@@ -979,6 +989,9 @@ grow_dev_page(struct block_device *bdev, sector_t block,
spin_unlock(&inode->i_mapping->private_lock);
done:
ret = (block < end_block) ? 1 : -ENXIO;
#ifdef CONFIG_DEBUG_AID_FOR_SYZBOT
current->getblk_executed |= 0x08;
#endif
failed:
unlock_page(page);
put_page(page);
......@@ -1034,6 +1047,12 @@ __getblk_slow(struct block_device *bdev, sector_t block,
return NULL;
}
#ifdef CONFIG_DEBUG_AID_FOR_SYZBOT
current->getblk_stamp = jiffies;
current->getblk_executed = 0;
current->getblk_bh_count = 0;
current->getblk_bh_state = 0;
#endif
for (;;) {
struct buffer_head *bh;
int ret;
......@@ -1045,6 +1064,24 @@ __getblk_slow(struct block_device *bdev, sector_t block,
ret = grow_buffers(bdev, block, size, gfp);
if (ret < 0)
return NULL;
#ifdef CONFIG_DEBUG_AID_FOR_SYZBOT
if (!time_after(jiffies, current->getblk_stamp + 3 * HZ))
continue;
printk(KERN_ERR "%s(%u): getblk(): executed=%x bh_count=%d bh_state=%lx bdev_super_blocksize=%ld size=%u bdev_super_blocksize_bits=%d bdev_inode_blkbits=%d\n",
current->comm, current->pid, current->getblk_executed,
current->getblk_bh_count, current->getblk_bh_state,
IS_ERR_OR_NULL(bdev->bd_super) ? -1L :
bdev->bd_super->s_blocksize, size,
IS_ERR_OR_NULL(bdev->bd_super) ? -1 :
bdev->bd_super->s_blocksize_bits,
IS_ERR_OR_NULL(bdev->bd_inode) ? -1 :
bdev->bd_inode->i_blkbits);
current->getblk_executed = 0;
current->getblk_bh_count = 0;
current->getblk_bh_state = 0;
current->getblk_stamp = jiffies;
#endif
}
}
......@@ -3223,6 +3260,11 @@ EXPORT_SYMBOL(sync_dirty_buffer);
*/
static inline int buffer_busy(struct buffer_head *bh)
{
#ifdef CONFIG_DEBUG_AID_FOR_SYZBOT
current->getblk_executed |= 0x80;
current->getblk_bh_count = atomic_read(&bh->b_count);
current->getblk_bh_state = bh->b_state;
#endif
return atomic_read(&bh->b_count) |
(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
}
......@@ -3261,11 +3303,18 @@ int try_to_free_buffers(struct page *page)
int ret = 0;
BUG_ON(!PageLocked(page));
if (PageWriteback(page))
if (PageWriteback(page)) {
#ifdef CONFIG_DEBUG_AID_FOR_SYZBOT
current->getblk_executed |= 0x10;
#endif
return 0;
}
if (mapping == NULL) { /* can this still happen? */
ret = drop_buffers(page, &buffers_to_free);
#ifdef CONFIG_DEBUG_AID_FOR_SYZBOT
current->getblk_executed |= 0x20;
#endif
goto out;
}
......@@ -3289,6 +3338,9 @@ int try_to_free_buffers(struct page *page)
if (ret)
cancel_dirty_page(page);
spin_unlock(&mapping->private_lock);
#ifdef CONFIG_DEBUG_AID_FOR_SYZBOT
current->getblk_executed |= 0x40;
#endif
out:
if (buffers_to_free) {
struct buffer_head *bh = buffers_to_free;
......
......@@ -551,28 +551,23 @@ static int ep_call_nested(struct nested_calls *ncalls,
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct nested_calls poll_safewake_ncalls;
static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
{
unsigned long flags;
wait_queue_head_t *wqueue = (wait_queue_head_t *)cookie;
spin_lock_irqsave_nested(&wqueue->lock, flags, call_nests + 1);
wake_up_locked_poll(wqueue, EPOLLIN);
spin_unlock_irqrestore(&wqueue->lock, flags);
return 0;
}
static DEFINE_PER_CPU(int, wakeup_nest);
static void ep_poll_safewake(wait_queue_head_t *wq)
{
int this_cpu = get_cpu();
ep_call_nested(&poll_safewake_ncalls,
ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
unsigned long flags;
int subclass;
put_cpu();
local_irq_save(flags);
preempt_disable();
subclass = __this_cpu_read(wakeup_nest);
spin_lock_nested(&wq->lock, subclass + 1);
__this_cpu_inc(wakeup_nest);
wake_up_locked_poll(wq, POLLIN);
__this_cpu_dec(wakeup_nest);
spin_unlock(&wq->lock);
local_irq_restore(flags);
preempt_enable();
}
#else
......@@ -671,7 +666,6 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
void *priv, int depth, bool ep_locked)
{
__poll_t res;
int pwake = 0;
struct epitem *epi, *nepi;
LIST_HEAD(txlist);
......@@ -738,26 +732,11 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
*/
list_splice(&txlist, &ep->rdllist);
__pm_relax(ep->ws);
if (!list_empty(&ep->rdllist)) {
/*
* Wake up (if active) both the eventpoll wait list and
* the ->poll() wait list (delayed after we release the lock).
*/
if (waitqueue_active(&ep->wq))
wake_up(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
write_unlock_irq(&ep->lock);
if (!ep_locked)
mutex_unlock(&ep->mtx);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return res;
}
......@@ -2370,11 +2349,6 @@ static int __init eventpoll_init(void)
*/
ep_nested_calls_init(&poll_loop_ncalls);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* Initialize the structure used to perform safe poll wait head wake ups */
ep_nested_calls_init(&poll_safewake_ncalls);
#endif
/*
* We can have many thousands of epitems, so prevent this from
* using an extra cache line on 64-bit (and smaller) CPUs
......
......@@ -440,7 +440,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
u32 hash;
index = page->index;
hash = hugetlb_fault_mutex_hash(h, mapping, index, 0);
hash = hugetlb_fault_mutex_hash(mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/*
......@@ -644,7 +644,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
addr = index * hpage_size;
/* mutex taken here, fault path and hole punch */
hash = hugetlb_fault_mutex_hash(h, mapping, index, addr);
hash = hugetlb_fault_mutex_hash(mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/* See if already present in mapping to avoid alloc/free */
......
......@@ -327,8 +327,8 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
down_read(&OCFS2_I(inode)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
up_read(&OCFS2_I(inode)->ip_xattr_sem);
if (IS_ERR(acl) || !acl)
return PTR_ERR(acl);
if (IS_ERR_OR_NULL(acl))
return PTR_ERR_OR_ZERO(acl);
ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (ret)
return ret;
......
......@@ -1230,6 +1230,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
if (IS_ERR(transfer_to[USRQUOTA])) {
status = PTR_ERR(transfer_to[USRQUOTA]);
transfer_to[USRQUOTA] = NULL;
goto bail_unlock;
}
}
......@@ -1239,6 +1240,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
if (IS_ERR(transfer_to[GRPQUOTA])) {
status = PTR_ERR(transfer_to[GRPQUOTA]);
transfer_to[GRPQUOTA] = NULL;
goto bail_unlock;
}
}
......@@ -2096,53 +2098,89 @@ static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
return 0;
}
static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
struct file *file,
loff_t pos, size_t count,
int *meta_level)
static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
struct buffer_head **di_bh,
int meta_level,
int overwrite_io,
int write_sem,
int wait)
{
int ret;
struct buffer_head *di_bh = NULL;
u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
u32 clusters =
ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
int ret = 0;
ret = ocfs2_inode_lock(inode, &di_bh, 1);
if (ret) {
mlog_errno(ret);
if (wait)
ret = ocfs2_inode_lock(inode, NULL, meta_level);
else
ret = ocfs2_try_inode_lock(inode,
overwrite_io ? NULL : di_bh, meta_level);
if (ret < 0)
goto out;
if (wait) {
if (write_sem)
down_write(&OCFS2_I(inode)->ip_alloc_sem);
else
down_read(&OCFS2_I(inode)->ip_alloc_sem);
} else {
if (write_sem)
ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
else
ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);