Commit 16af43fe authored by Russell King's avatar Russell King

Merge branches 'devel-stable', 'fixes' and 'mmci' into for-linus

File mode changed from 100755 to 100644
......@@ -2438,7 +2438,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
real-time workloads. It can also improve energy
efficiency for asymmetric multiprocessors.
rcu_nocbs_poll [KNL,BOOT]
rcu_nocb_poll [KNL,BOOT]
Rather than requiring that offloaded CPUs
(specified by rcu_nocbs= above) explicitly
awaken the corresponding "rcuoN" kthreads,
......
......@@ -57,7 +57,7 @@ Protocol 2.10: (Kernel 2.6.31) Added a protocol for relaxed alignment
Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover
protocol entry point.
Protocol 2.12: (Kernel 3.9) Added the xloadflags field and extension fields
Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
to struct boot_params for for loading bzImage and ramdisk
above 4G in 64bit.
......
......@@ -1489,7 +1489,7 @@ AVR32 ARCHITECTURE
M: Haavard Skinnemoen <hskinnemoen@gmail.com>
M: Hans-Christian Egtvedt <egtvedt@samfundet.no>
W: http://www.atmel.com/products/AVR32/
W: http://avr32linux.org/
W: http://mirror.egtvedt.no/avr32linux.org/
W: http://avrfreaks.net/
S: Maintained
F: arch/avr32/
......
VERSION = 3
PATCHLEVEL = 8
SUBLEVEL = 0
EXTRAVERSION = -rc5
NAME = Terrified Chipmunk
EXTRAVERSION = -rc7
NAME = Unicycling Gorilla
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
......
......@@ -68,8 +68,8 @@ else
endif
check_for_multiple_loadaddr = \
if [ $(words $(UIMAGE_LOADADDR)) -gt 1 ]; then \
echo 'multiple load addresses: $(UIMAGE_LOADADDR)'; \
if [ $(words $(UIMAGE_LOADADDR)) -ne 1 ]; then \
echo 'multiple (or no) load addresses: $(UIMAGE_LOADADDR)'; \
echo 'This is incompatible with uImages'; \
echo 'Specify LOADADDR on the commandline to build an uImage'; \
false; \
......
......@@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
irq_set_chained_handler(irq, gic_handle_cascade_irq);
}
static u8 gic_get_cpumask(struct gic_chip_data *gic)
{
void __iomem *base = gic_data_dist_base(gic);
u32 mask, i;
for (i = mask = 0; i < 32; i += 4) {
mask = readl_relaxed(base + GIC_DIST_TARGET + i);
mask |= mask >> 16;
mask |= mask >> 8;
if (mask)
break;
}
if (!mask)
pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
return mask;
}
static void __init gic_dist_init(struct gic_chip_data *gic)
{
unsigned int i;
......@@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
/*
* Set all global interrupts to this CPU only.
*/
cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0);
cpumask = gic_get_cpumask(gic);
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
for (i = 32; i < gic_irqs; i += 4)
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
......@@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
* Get what the GIC says our CPU mask is.
*/
BUG_ON(cpu >= NR_GIC_CPU_IF);
cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0);
cpu_mask = gic_get_cpumask(gic);
gic_cpu_map[cpu] = cpu_mask;
/*
......
......@@ -24,6 +24,7 @@ extern struct arm_delay_ops {
void (*delay)(unsigned long);
void (*const_udelay)(unsigned long);
void (*udelay)(unsigned long);
bool const_clock;
} arm_delay_ops;
#define __delay(n) arm_delay_ops.delay(n)
......
......@@ -37,7 +37,7 @@
*/
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
* The maximum size of a 26-bit user space task.
......
......@@ -247,7 +247,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
L_PTE_NONE | L_PTE_VALID;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
......
......@@ -93,11 +93,11 @@ static void notrace update_sched_clock(void)
* detectable in cyc_to_fixed_sched_clock().
*/
raw_local_irq_save(flags);
cd.epoch_cyc = cyc;
cd.epoch_cyc_copy = cyc;
smp_wmb();
cd.epoch_ns = ns;
smp_wmb();
cd.epoch_cyc_copy = cyc;
cd.epoch_cyc = cyc;
raw_local_irq_restore(flags);
}
......
......@@ -686,6 +686,9 @@ static int cpufreq_callback(struct notifier_block *nb,
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
if (arm_delay_ops.const_clock)
return NOTIFY_OK;
if (!per_cpu(l_p_j_ref, cpu)) {
per_cpu(l_p_j_ref, cpu) =
per_cpu(cpu_data, cpu).loops_per_jiffy;
......
......@@ -77,6 +77,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
arm_delay_ops.delay = __timer_delay;
arm_delay_ops.const_udelay = __timer_const_udelay;
arm_delay_ops.udelay = __timer_udelay;
arm_delay_ops.const_clock = true;
delay_calibrated = true;
} else {
pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
......
......@@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT
select CPU_EXYNOS4210
select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD
select PINCTRL
select PINCTRL_EXYNOS4
select PINCTRL_EXYNOS
select USE_OF
help
Machine support for Samsung Exynos4 machine with device tree enabled.
......
......@@ -115,7 +115,7 @@
/*
* Only define NR_IRQS if less than NR_IRQS_EB
*/
#define NR_IRQS_EB (IRQ_EB_GIC_START + 96)
#define NR_IRQS_EB (IRQ_EB_GIC_START + 128)
#if defined(CONFIG_MACH_REALVIEW_EB) \
&& (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
......
......@@ -749,7 +749,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
unsigned long instr = 0, instrptr;
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
unsigned int type;
mm_segment_t fs;
unsigned int fault;
u16 tinstr = 0;
int isize = 4;
......@@ -760,16 +759,15 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
instrptr = instruction_pointer(regs);
fs = get_fs();
set_fs(KERNEL_DS);
if (thumb_mode(regs)) {
fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
u16 *ptr = (u16 *)(instrptr & ~1);
fault = probe_kernel_address(ptr, tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
u16 tinst2 = 0;
fault = __get_user(tinst2, (u16 *)(instrptr+2));
fault = probe_kernel_address(ptr + 1, tinst2);
instr = (tinstr << 16) | tinst2;
thumb2_32b = 1;
} else {
......@@ -778,8 +776,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
}
} else
fault = __get_user(instr, (u32 *)instrptr);
set_fs(fs);
fault = probe_kernel_address(instrptr, instr);
if (fault) {
type = TYPE_FAULT;
......
......@@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (is_coherent || nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (gfp & GFP_ATOMIC)
else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
......
......@@ -22,12 +22,14 @@
.macro DBGSTR, str
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
add r0, pc, #4
ldr r0, =1f
bl printk
b 1f
.asciz KERN_DEBUG "VFP: \str\n"
.balign 4
1: ldmfd sp!, {r0-r3, ip, lr}
ldmfd sp!, {r0-r3, ip, lr}
.pushsection .rodata, "a"
1: .ascii KERN_DEBUG "VFP: \str\n"
.byte 0
.previous
#endif
.endm
......@@ -35,12 +37,14 @@
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
mov r1, \arg
add r0, pc, #4
ldr r0, =1f
bl printk
b 1f
.asciz KERN_DEBUG "VFP: \str\n"
.balign 4
1: ldmfd sp!, {r0-r3, ip, lr}
ldmfd sp!, {r0-r3, ip, lr}
.pushsection .rodata, "a"
1: .ascii KERN_DEBUG "VFP: \str\n"
.byte 0
.previous
#endif
.endm
......@@ -50,12 +54,14 @@
mov r3, \arg3
mov r2, \arg2
mov r1, \arg1
add r0, pc, #4
ldr r0, =1f
bl printk
b 1f
.asciz KERN_DEBUG "VFP: \str\n"
.balign 4
1: ldmfd sp!, {r0-r3, ip, lr}
ldmfd sp!, {r0-r3, ip, lr}
.pushsection .rodata, "a"
1: .ascii KERN_DEBUG "VFP: \str\n"
.byte 0
.previous
#endif
.endm
......
......@@ -413,7 +413,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
* If there isn't a second FP instruction, exit now. Note that
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
*/
if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
goto exit;
/*
......
......@@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
/* drivers/base/dma-mapping.c */
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size);
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
#endif /* __ASM_AVR32_DMA_MAPPING_H */
......@@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
_dma_sync((dma_addr_t)vaddr, size, dir);
}
/* drivers/base/dma-mapping.c */
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size);
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
#endif /* _BLACKFIN_DMA_MAPPING_H */
......@@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
/* Not supported for now */
static inline int dma_mmap_coherent(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size)
{
return -EINVAL;
}
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
return -EINVAL;
}
#endif /* _ASM_C6X_DMA_MAPPING_H */
......@@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
}
/* drivers/base/dma-mapping.c */
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size);
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
#endif
......@@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
flush_write_buffers();
}
/* Not supported for now */
static inline int dma_mmap_coherent(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size)
{
return -EINVAL;
}
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
return -EINVAL;
}
#endif /* _ASM_DMA_MAPPING_H */
......@@ -115,4 +115,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
#include <asm-generic/dma-mapping-broken.h>
#endif
/* drivers/base/dma-mapping.c */
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size);
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
#endif /* _M68K_DMA_MAPPING_H */
......@@ -8,8 +8,10 @@ config BCM47XX_SSB
select SSB_DRIVER_EXTIF
select SSB_EMBEDDED
select SSB_B43_PCI_BRIDGE if PCI
select SSB_DRIVER_PCICORE if PCI
select SSB_PCICORE_HOSTMODE if PCI
select SSB_DRIVER_GPIO
select GPIOLIB
default y
help
Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
......@@ -25,6 +27,7 @@ config BCM47XX_BCMA
select BCMA_HOST_PCI if PCI
select BCMA_DRIVER_PCI_HOSTMODE if PCI
select BCMA_DRIVER_GPIO
select GPIOLIB
default y
help
Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
......
......@@ -30,6 +30,7 @@
* measurement, and debugging facilities.
*/
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-l2c.h>
......@@ -285,22 +286,22 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
*/
static void fault_in(uint64_t addr, int len)
{
volatile char *ptr;
volatile char dummy;
char *ptr;
/*
* Adjust addr and length so we get all cache lines even for
* small ranges spanning two cache lines.
*/
len += addr & CVMX_CACHE_LINE_MASK;
addr &= ~CVMX_CACHE_LINE_MASK;
ptr = (volatile char *)cvmx_phys_to_ptr(addr);
ptr = cvmx_phys_to_ptr(addr);
/*
* Invalidate L1 cache to make sure all loads result in data
* being in L2.
*/
CVMX_DCACHE_INVALIDATE;
while (len > 0) {
dummy += *ptr;
ACCESS_ONCE(*ptr);
len -= CVMX_CACHE_LINE_SIZE;
ptr += CVMX_CACHE_LINE_SIZE;
}
......
......@@ -16,7 +16,7 @@
#include <asm/mipsregs.h>
#define DSP_DEFAULT 0x00000000
#define DSP_MASK 0x3ff
#define DSP_MASK 0x3f
#define __enable_dsp_hazard() \
do { \
......
......@@ -353,6 +353,7 @@ union mips_instruction {
struct u_format u_format;
struct c_format c_format;
struct r_format r_format;
struct p_format p_format;
struct f_format f_format;
struct ma_format ma_format;
struct b_format b_format;
......
......@@ -21,4 +21,4 @@
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
#endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */
#endif /* __ASM_MIPS_MACH_PNX833X_WAR_H */
......@@ -230,6 +230,7 @@ static inline void pud_clear(pud_t *pudp)
#else
#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
#endif
#define __pgd_offset(address) pgd_index(address)
......
......@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
header-y += auxvec.h
header-y += bitsperlong.h
header-y += break.h
header-y += byteorder.h
header-y += cachectl.h
header-y += errno.h
......
......@@ -25,6 +25,12 @@
#define MCOUNT_OFFSET_INSNS 4
#endif
/* Arch override because MIPS doesn't need to run this from stop_machine() */
void arch_ftrace_update_code(int command)
{
ftrace_modify_all_code(command);
}
/*
* Check if the address is in kernel space
*
......@@ -89,6 +95,24 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
return 0;
}
#ifndef CONFIG_64BIT
static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
unsigned int new_code2)
{
int faulted;
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
ip += 4;
safe_store_code(new_code2, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
flush_icache_range(ip, ip + 8); /* original ip + 12 */
return 0;
}
#endif
/*
* The details about the calling site of mcount on MIPS
*
......@@ -131,8 +155,18 @@ int ftrace_make_nop(struct module *mod,
* needed.
*/
new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
#else
/*
* On 32 bit MIPS platforms, gcc adds a stack adjust
* instruction in the delay slot after the branch to
* mcount and expects mcount to restore the sp on return.
* This is based on a legacy API and does nothing but
* waste instructions so it's being removed at runtime.
*/
return ftrace_modify_code_2(ip, new, INSN_NOP);
#endif
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
......
......@@ -46,9 +46,8 @@
PTR_L a5, PT_R9(sp)
PTR_L a6, PT_R10(sp)
PTR_L a7, PT_R11(sp)
PTR_ADDIU sp, PT_SIZE
#else
PTR_ADDIU sp, (PT_SIZE + 8)
PTR_ADDIU sp, PT_SIZE
#endif
.endm
......@@ -69,7 +68,9 @@ NESTED(ftrace_caller, PT_SIZE, ra)
.globl _mcount
_mcount:
b ftrace_stub
nop
addiu sp,sp,8
/* When tracing is activated, it calls ftrace_caller+8 (aka here) */
lw t1, function_trace_stop
bnez t1, ftrace_stub
nop
......
......@@ -705,7 +705,7 @@ static int vpe_run(struct vpe * v)
printk(KERN_WARNING
"VPE loader: TC %d is already in use.\n",
t->index);
v->tc->index);
return -ENOEXEC;
}
} else {
......
......@@ -408,7 +408,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
#endif
/* tell oprofile which irq to use */
cp0_perfcount_irq = LTQ_PERF_IRQ;
cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
/*
* if the timer irq is not one of the mips irqs we need to
......
......@@ -21,7 +21,7 @@ void __delay(unsigned long loops)
" .set noreorder \n"
" .align 3 \n"
"1: bnez %0, 1b \n"
#if __SIZEOF_LONG__ == 4
#if BITS_PER_LONG == 32
" subu %0, 1 \n"
#else
" dsubu %0, 1 \n"
......
......@@ -190,9 +190,3 @@ void __iounmap(const volatile void __iomem *addr)
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(__iounmap);
int __virt_addr_valid(const volatile void *kaddr)
{
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
}
EXPORT_SYMBOL_GPL(__virt_addr_valid);