during suspend.
- ti,no-reset-on-init: When present, the module should not be reset at init
- ti,no-idle-on-init: When present, the module should not be idled at init
+- ti,no-idle: When present, the module is never allowed to idle.
Example:
- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
the kernel may now execute it. We handle this by also setting spte.nx.
If we get a user fetch or read fault, we'll change spte.u=1 and
- spte.nx=gpte.nx back.
+ spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when
+ shadow paging is in use.
- if CR4.SMAP is disabled: since the page has been changed to a kernel
page, it can not be reused when CR4.SMAP is enabled. We set
CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
S: Supported
F: drivers/net/ethernet/myricom/myri10ge/
+NAND FLASH SUBSYSTEM
+M: Boris Brezillon <boris.brezillon@free-electrons.com>
+R: Richard Weinberger <richard@nod.at>
+L: linux-mtd@lists.infradead.org
+W: http://www.linux-mtd.infradead.org/
+Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
+T: git git://github.com/linux-nand/linux.git
+S: Maintained
+F: drivers/mtd/nand/
+F: include/linux/mtd/nand*.h
+
NATSEMI ETHERNET DRIVER (DP8381x)
S: Orphan
F: drivers/net/ethernet/natsemi/natsemi.c
VERSION = 4
PATCHLEVEL = 5
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Blurry Fish Butt
# *DOCUMENTATION*
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller {
status = "okay";
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
devbus-bootcs {
status = "okay";
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
devbus-bootcs {
status = "okay";
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller {
status = "okay";
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller {
status = "okay";
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
internal-regs {
serial@12000 {
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller {
status = "okay";
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x8000000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x01, 0x2f) 0 0 0xe8000000 0x8000000
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
devbus-bootcs {
status = "okay";
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller {
status = "okay";
0x48485200 0x2E00>;
#address-cells = <1>;
#size-cells = <1>;
+
+ /*
+ * Do not allow gating of cpsw clock as workaround
+ * for errata i877. Keeping internal clock disabled
+ * causes the device switching characteristics
+ * to degrade over time and eventually fail to meet
+ * the data manual delay time/skew specs.
+ */
+ ti,no-idle;
+
/*
* rx_thresh_pend
* rx_pend
.name = "Kernel code",
.start = 0,
.end = 0,
- .flags = IORESOURCE_MEM
+ .flags = IORESOURCE_SYSTEM_RAM
},
{
.name = "Kernel data",
.start = 0,
.end = 0,
- .flags = IORESOURCE_MEM
+ .flags = IORESOURCE_SYSTEM_RAM
}
};
res->name = "System RAM";
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
*/
static int _idle(struct omap_hwmod *oh)
{
+ if (oh->flags & HWMOD_NO_IDLE) {
+ oh->_int_flags |= _HWMOD_SKIP_ENABLE;
+ return 0;
+ }
+
pr_debug("omap_hwmod: %s: idling\n", oh->name);
if (oh->_state != _HWMOD_STATE_ENABLED) {
oh->flags |= HWMOD_INIT_NO_RESET;
if (of_find_property(np, "ti,no-idle-on-init", NULL))
oh->flags |= HWMOD_INIT_NO_IDLE;
+ if (of_find_property(np, "ti,no-idle", NULL))
+ oh->flags |= HWMOD_NO_IDLE;
}
oh->_state = _HWMOD_STATE_INITIALIZED;
* XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data -
* it should be set by the core code as a runtime flag during startup
*/
- if ((oh->flags & HWMOD_INIT_NO_IDLE) &&
+ if ((oh->flags & (HWMOD_INIT_NO_IDLE | HWMOD_NO_IDLE)) &&
(postsetup_state == _HWMOD_STATE_IDLE)) {
oh->_int_flags |= _HWMOD_SKIP_ENABLE;
postsetup_state = _HWMOD_STATE_ENABLED;
* or idled.
* HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to
* operate and they need to be handled at the same time as the main_clk.
+ * HWMOD_NO_IDLE: Do not idle the hwmod at all. Useful to handle certain
+ * IPs like CPSW on DRA7, where clocks to this module cannot be disabled.
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
#define HWMOD_SWSUP_SIDLE_ACT (1 << 12)
#define HWMOD_RECONFIG_IO_CHAIN (1 << 13)
#define HWMOD_OPT_CLKS_NEEDED (1 << 14)
+#define HWMOD_NO_IDLE (1 << 15)
/*
* omap_hwmod._int_flags definitions
if (ptr->child != NULL)
s3c_pm_run_res(ptr->child, fn, arg);
- if ((ptr->flags & IORESOURCE_MEM) &&
- strcmp(ptr->name, "System RAM") == 0) {
+ if ((ptr->flags & IORESOURCE_SYSTEM_RAM)
+ == IORESOURCE_SYSTEM_RAM) {
S3C_PMDBG("Found system RAM at %08lx..%08lx\n",
(unsigned long)ptr->start,
(unsigned long)ptr->end);
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
* fixed mappings and modules
*/
-#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
+#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
#ifndef CONFIG_KASAN
#define VMALLOC_START (VA_START)
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define VMEMMAP_START (VMALLOC_END + SZ_64K)
-#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
+#define vmemmap ((struct page *)VMEMMAP_START - \
+ SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
#define FIRST_USER_ADDRESS 0UL
.name = "Kernel code",
.start = 0,
.end = 0,
- .flags = IORESOURCE_MEM
+ .flags = IORESOURCE_SYSTEM_RAM
},
{
.name = "Kernel data",
.start = 0,
.end = 0,
- .flags = IORESOURCE_MEM
+ .flags = IORESOURCE_SYSTEM_RAM
}
};
res->name = "System RAM";
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
- } else if (ps == (PAGE_SIZE * CONT_PTES)) {
- hugetlb_add_hstate(CONT_PTE_SHIFT);
- } else if (ps == (PMD_SIZE * CONT_PMDS)) {
- hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
} else {
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
return 0;
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
-
-#ifdef CONFIG_ARM64_64K_PAGES
-static __init int add_default_hugepagesz(void)
-{
- if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
- hugetlb_add_hstate(CONT_PMD_SHIFT);
- return 0;
-}
-arch_initcall(add_default_hugepagesz);
-#endif
.name = "Kernel data",
.start = 0,
.end = 0,
- .flags = IORESOURCE_MEM,
+ .flags = IORESOURCE_SYSTEM_RAM,
};
static struct resource __initdata kernel_code = {
.name = "Kernel code",
.start = 0,
.end = 0,
- .flags = IORESOURCE_MEM,
+ .flags = IORESOURCE_SYSTEM_RAM,
.sibling = &kernel_data,
};
new->start = start;
new->end = end;
new->name = "System RAM";
- new->flags = IORESOURCE_MEM;
+ new->flags = IORESOURCE_SYSTEM_RAM;
*pprev = new;
}
efi_memory_desc_t *md;
u64 efi_desc_size;
char *name;
- unsigned long flags;
+ unsigned long flags, desc;
efi_map_start = __va(ia64_boot_param->efi_memmap);
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
continue;
flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ desc = IORES_DESC_NONE;
+
switch (md->type) {
case EFI_MEMORY_MAPPED_IO:
if (md->attribute & EFI_MEMORY_WP) {
name = "System ROM";
flags |= IORESOURCE_READONLY;
- } else if (md->attribute == EFI_MEMORY_UC)
+ } else if (md->attribute == EFI_MEMORY_UC) {
name = "Uncached RAM";
- else
+ } else {
name = "System RAM";
+ flags |= IORESOURCE_SYSRAM;
+ }
break;
case EFI_ACPI_MEMORY_NVS:
name = "ACPI Non-volatile Storage";
+ desc = IORES_DESC_ACPI_NV_STORAGE;
break;
case EFI_UNUSABLE_MEMORY:
case EFI_PERSISTENT_MEMORY:
name = "Persistent Memory";
+ desc = IORES_DESC_PERSISTENT_MEMORY;
break;
case EFI_RESERVED_TYPE:
res->start = md->phys_addr;
res->end = md->phys_addr + efi_md_size(md) - 1;
res->flags = flags;
+ res->desc = desc;
if (insert_resource(&iomem_resource, res) < 0)
kfree(res);
static struct resource data_resource = {
.name = "Kernel data",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
static struct resource code_resource = {
.name = "Kernel code",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
static struct resource bss_resource = {
.name = "Kernel bss",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
unsigned long ia64_max_cacheline_size;
.name = "Kernel data",
.start = 0,
.end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
static struct resource code_resource = {
.name = "Kernel code",
.start = 0,
.end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
unsigned long memory_start;
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select SYNC_R4K
- select MIPS_GIC_IPI
+ select MIPS_GIC_IPI if MIPS_GIC
select MIPS_MT
select SMP
select SMP_UP
config MIPS_CMP
bool "MIPS CMP framework support (DEPRECATED)"
depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
- select MIPS_GIC_IPI
+ select MIPS_GIC_IPI if MIPS_GIC
select SMP
select SYNC_R4K
select SYS_SUPPORTS_SMP
select MIPS_CM
select MIPS_CPC
select MIPS_CPS_PM if HOTPLUG_CPU
- select MIPS_GIC_IPI
+ select MIPS_GIC_IPI if MIPS_GIC
select SMP
select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
select SYS_SUPPORTS_HOTPLUG_CPU
bool
config MIPS_GIC_IPI
+ depends on MIPS_GIC
bool
config MIPS_CM
#define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
#endif
-#ifdef CONFIG_MACH_JZ4740
+#if defined(CONFIG_MACH_JZ4740) || defined(CONFIG_MACH_JZ4780)
#include <asm/mach-jz4740/base.h>
#define PORT(offset) (CKSEG1ADDR(JZ4740_UART0_BASE_ADDR) + (4 * offset))
#endif
end = HIGHMEM_START - 1;
res = alloc_bootmem(sizeof(struct resource));
+
+ res->start = start;
+ res->end = end;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
case BOOT_MEM_INIT_RAM:
case BOOT_MEM_ROM_DATA:
res->name = "System RAM";
+ res->flags |= IORESOURCE_SYSRAM;
break;
case BOOT_MEM_RESERVED:
default:
res->name = "reserved";
}
- res->start = start;
- res->end = end;
-
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
/*
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
+ cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)
static struct resource data_resource = {
.name = "Kernel data",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource code_resource = {
.name = "Kernel code",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource pdcdata_resource = {
res->name = "System RAM";
res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
}
std r6, VCPU_ACOP(r9)
stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9)
+ /*
+ * Restore various registers to 0, where non-zero values
+ * set by the guest could disrupt the host.
+ */
+ li r0, 0
+ mtspr SPRN_IAMR, r0
+ mtspr SPRN_CIABR, r0
+ mtspr SPRN_DAWRX, r0
+ mtspr SPRN_TCSCR, r0
+ mtspr SPRN_WORT, r0
+ /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
+ li r0, 1
+ sldi r0, r0, 31
+ mtspr SPRN_MMCRS, r0
8:
/* Save and reset AMR and UAMOR before turning on the MMU */
res->name = "System RAM";
res->start = base;
res->end = base + size - 1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
WARN_ON(request_resource(&iomem_resource, res) < 0);
}
}
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
+ spin_lock_init(&mm->context.list_lock);
+ INIT_LIST_HEAD(&mm->context.pgtable_list);
+ INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.attach_count, 0);
mm->context.flush_mm = 0;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
- mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste;
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
#endif
- mm->context.asce_limit = STACK_TOP_MAX;
+ if (mm->context.asce_limit == 0) {
+ /* context created by exec, set asce limit to 4TB */
+ mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+ mm->context.asce_limit = STACK_TOP_MAX;
+ } else if (mm->context.asce_limit == (1UL << 31)) {
+ mm_inc_nr_pmds(mm);
+ }
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
}
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
- if (oldmm->context.asce_limit < mm->context.asce_limit)
- crst_table_downgrade(mm, oldmm->context.asce_limit);
}
static inline void arch_exit_mmap(struct mm_struct *mm)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- spin_lock_init(&mm->context.list_lock);
- INIT_LIST_HEAD(&mm->context.pgtable_list);
- INIT_LIST_HEAD(&mm->context.gmap_list);
- return (pgd_t *) crst_table_alloc(mm);
+ unsigned long *table = crst_table_alloc(mm);
+
+ if (!table)
+ return NULL;
+ if (mm->context.asce_limit == (1UL << 31)) {
+ /* Forking a compat process with 2 page table levels */
+ if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
+ crst_table_free(mm, table);
+ return NULL;
+ }
+ }
+ return (pgd_t *) table;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ if (mm->context.asce_limit == (1UL << 31))
+ pgtable_pmd_page_dtor(virt_to_page(pgd));
+ crst_table_free(mm, (unsigned long *) pgd);
}
-#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
static inline void pmd_populate(struct mm_struct *mm,
pmd_t *pmd, pgtable_t pte)
__HEAD
ENTRY(startup_continue)
- tm __LC_STFLE_FAC_LIST+6,0x80 # LPP available ?
+ tm __LC_STFLE_FAC_LIST+5,0x80 # LPP available ?
jz 0f
xc __LC_LPP+1(7,0),__LC_LPP+1 # clear lpp and current_pid
mvi __LC_LPP,0x80 # and set LPP_MAGIC
static struct resource code_resource = {
.name = "Kernel code",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource data_resource = {
.name = "Kernel data",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource bss_resource = {
.name = "Kernel bss",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource __initdata *standard_resources[] = {
for_each_memblock(memory, reg) {
res = alloc_bootmem_low(sizeof(*res));
- res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+ res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
res->start = reg->base;
/* manually convert vector registers if necessary */
if (MACHINE_HAS_VX) {
- convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
+ convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
fprs, 128);
} else {
res->name = "System RAM";
res->start = MEMORY_START;
res->end = MEMORY_START + MEMORY_SIZE - 1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
request_resource(res, &code_resource);
static struct resource code_resource = {
.name = "Kernel code",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource data_resource = {
.name = "Kernel data",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource bss_resource = {
.name = "Kernel bss",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
unsigned long memory_start;
res->name = "System RAM";
res->start = start;
res->end = end - 1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, res)) {
pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
static struct resource code_resource = {
.name = "Kernel code",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
static struct resource data_resource = {
.name = "Kernel data",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
static struct resource bss_resource = {
.name = "Kernel bss",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
static inline resource_size_t compute_kern_paddr(void *addr)
res->name = "System RAM";
res->start = pavail[i].phys_addr;
res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
- res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+ res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
if (insert_resource(&iomem_resource, res) < 0) {
pr_warn("Resource insertion failed.\n");
.name = "Kernel data",
.start = 0,
.end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
static struct resource code_resource = {
.name = "Kernel code",
.start = 0,
.end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
/*
kzalloc(sizeof(struct resource), GFP_ATOMIC);
if (!res)
return NULL;
- res->name = reserved ? "Reserved" : "System RAM";
res->start = start_pfn << PAGE_SHIFT;
res->end = (end_pfn << PAGE_SHIFT) - 1;
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+ if (reserved) {
+ res->name = "Reserved";
+ } else {
+ res->name = "System RAM";
+ res->flags |= IORESOURCE_SYSRAM;
+ }
if (insert_resource(&iomem_resource, res)) {
kfree(res);
return NULL;
.name = "Kernel code",
.start = 0,
.end = 0,
- .flags = IORESOURCE_MEM
+ .flags = IORESOURCE_SYSTEM_RAM
},
{
.name = "Kernel data",
.start = 0,
.end = 0,
- .flags = IORESOURCE_MEM
+ .flags = IORESOURCE_SYSTEM_RAM
}
};
res->name = "System RAM";
res->start = mi->bank[i].start;
res->end = mi->bank[i].start + mi->bank[i].size - 1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
/* Supported features which support lazy state saving */
#define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \
- XFEATURE_MASK_SSE)
-
-/* Supported features which require eager state saving */
-#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | \
- XFEATURE_MASK_BNDCSR | \
+ XFEATURE_MASK_SSE | \
XFEATURE_MASK_YMM | \
XFEATURE_MASK_OPMASK | \
XFEATURE_MASK_ZMM_Hi256 | \
XFEATURE_MASK_Hi16_ZMM)
+/* Supported features which require eager state saving */
+#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
+
/* All currently supported features */
#define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
struct kimage *image;
/*
* Total number of ram ranges we have after various adjustments for
- * GART, crash reserved region etc.
+ * crash reserved region, etc.
*/
unsigned int max_nr_ranges;
- unsigned long gart_start, gart_end;
/* Pointer to elf header */
void *ehdr;
return 0;
}
-static int get_gart_ranges_callback(u64 start, u64 end, void *arg)
-{
- struct crash_elf_data *ced = arg;
-
- ced->gart_start = start;
- ced->gart_end = end;
-
- /* Not expecting more than 1 gart aperture */
- return 1;
-}
-
/* Gather all the required information to prepare elf headers for ram regions */
static void fill_up_crash_elf_data(struct crash_elf_data *ced,
ced->max_nr_ranges = nr_ranges;
- /*
- * We don't create ELF headers for GART aperture as an attempt
- * to dump this memory in second kernel leads to hang/crash.
- * If gart aperture is present, one needs to exclude that region
- * and that could lead to need of extra phdr.
- */
- walk_iomem_res("GART", IORESOURCE_MEM, 0, -1,
- ced, get_gart_ranges_callback);
-
- /*
- * If we have gart region, excluding that could potentially split
- * a memory range, resulting in extra header. Account for that.
- */
- if (ced->gart_end)
- ced->max_nr_ranges++;
-
/* Exclusion of crash region could split memory ranges */
ced->max_nr_ranges++;
return ret;
}
- /* Exclude GART region */
- if (ced->gart_end) {
- ret = exclude_mem_range(cmem, ced->gart_start, ced->gart_end);
- if (ret)
- return ret;
- }
-
return ret;
}
/* Add ACPI tables */
cmd.type = E820_ACPI;
flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- walk_iomem_res("ACPI Tables", flags, 0, -1, &cmd,
+ walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
memmap_entry_callback);
/* Add ACPI Non-volatile Storage */
cmd.type = E820_NVS;
- walk_iomem_res("ACPI Non-volatile Storage", flags, 0, -1, &cmd,
+ walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
memmap_entry_callback);
/* Add crashk_low_res region */
}
}
+static unsigned long e820_type_to_iomem_type(int e820_type)
+{
+ switch (e820_type) {
+ case E820_RESERVED_KERN:
+ case E820_RAM:
+ return IORESOURCE_SYSTEM_RAM;
+ case E820_ACPI:
+ case E820_NVS:
+ case E820_UNUSABLE:
+ case E820_PRAM:
+ case E820_PMEM:
+ default:
+ return IORESOURCE_MEM;
+ }
+}
+
+static unsigned long e820_type_to_iores_desc(int e820_type)
+{
+ switch (e820_type) {
+ case E820_ACPI:
+ return IORES_DESC_ACPI_TABLES;
+ case E820_NVS:
+ return IORES_DESC_ACPI_NV_STORAGE;
+ case E820_PMEM:
+ return IORES_DESC_PERSISTENT_MEMORY;
+ case E820_PRAM:
+ return IORES_DESC_PERSISTENT_MEMORY_LEGACY;
+ case E820_RESERVED_KERN:
+ case E820_RAM:
+ case E820_UNUSABLE:
+ default:
+ return IORES_DESC_NONE;
+ }
+}
+
static bool do_mark_busy(u32 type, struct resource *res)
{
/* this is the legacy bios/dos rom-shadow + mmio region */
res->start = e820.map[i].addr;
res->end = end;
- res->flags = IORESOURCE_MEM;
+ res->flags = e820_type_to_iomem_type(e820.map[i].type);
+ res->desc = e820_type_to_iores_desc(e820.map[i].type);
/*
* don't register the region that could be conflicted with
{
if (use_xsave())
copy_kernel_to_xregs(&init_fpstate.xsave, -1);
- else
+ else if (static_cpu_has(X86_FEATURE_FXSR))
copy_kernel_to_fxregs(&init_fpstate.fxsave);
+ else
+ copy_kernel_to_fregs(&init_fpstate.fsave);
}
/*
cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
write_cr0(cr0);
- asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
- : "+m" (fsw), "+m" (fcw));
+ if (!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
+ asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+ : "+m" (fsw), "+m" (fcw));
- if (fsw == 0 && (fcw & 0x103f) == 0x003f)
- set_cpu_cap(c, X86_FEATURE_FPU);
- else
- clear_cpu_cap(c, X86_FEATURE_FPU);
+ if (fsw == 0 && (fcw & 0x103f) == 0x003f)
+ set_cpu_cap(c, X86_FEATURE_FPU);
+ else
+ clear_cpu_cap(c, X86_FEATURE_FPU);
+ }
#ifndef CONFIG_MATH_EMULATION
if (!cpu_has_fpu) {
* Set up the legacy init FPU context. (xstate init might overwrite this
* with a more modern format, if the CPU supports it.)
*/
- fpstate_init_fxstate(&init_fpstate.fxsave);
+ fpstate_init(&init_fpstate);
fpu__init_system_mxcsr();
}
static void __init fpu__clear_eager_fpu_features(void)
{
setup_clear_cpu_cap(X86_FEATURE_MPX);
- setup_clear_cpu_cap(X86_FEATURE_AVX);
- setup_clear_cpu_cap(X86_FEATURE_AVX2);
- setup_clear_cpu_cap(X86_FEATURE_AVX512F);
- setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
- setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
- setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
}
/*
static __init int register_e820_pmem(void)
{
- char *pmem = "Persistent Memory (legacy)";
struct platform_device *pdev;
int rc;
- rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found);
+ rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
+ IORESOURCE_MEM, 0, -1, NULL, found);
if (rc <= 0)
return 0;
.name = "Kernel data",
.start = 0,
.end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
static struct resource code_resource = {
.name = "Kernel code",
.start = 0,
.end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
static struct resource bss_resource = {
.name = "Kernel bss",
.start = 0,
.end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
+ bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
+
/*
* Passing "true" to the last argument is okay; it adds a check
* on bit 8 of the SPTEs which KVM doesn't use anyway.
*/
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,
- context->shadow_root_level, context->nx,
+ context->shadow_root_level, uses_nx,
guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
true);
}
return;
}
break;
+ case MSR_IA32_PEBS_ENABLE:
+ /* PEBS needs a quiescent period after being disabled (to write
+ * a record). Disabling PEBS through VMX MSR swapping doesn't
+ * provide that period, so a CPU could write host's record into
+ * guest's memory.
+ */
+ wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
}
for (i = 0; i < m->nr; ++i)
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
{
- u64 guest_efer;
- u64 ignore_bits;
+ u64 guest_efer = vmx->vcpu.arch.efer;
+ u64 ignore_bits = 0;
- guest_efer = vmx->vcpu.arch.efer;
+ if (!enable_ept) {
+ /*
+ * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
+ * host CPUID is more efficient than testing guest CPUID
+ * or CR4. Host SMEP is anyway a requirement for guest SMEP.
+ */
+ if (boot_cpu_has(X86_FEATURE_SMEP))
+ guest_efer |= EFER_NX;
+ else if (!(guest_efer & EFER_NX))
+ ignore_bits |= EFER_NX;
+ }
/*
- * NX is emulated; LMA and LME handled by hardware; SCE meaningless
- * outside long mode
+ * LMA and LME handled by hardware; SCE meaningless outside long mode.
*/
- ignore_bits = EFER_NX | EFER_SCE;
+ ignore_bits |= EFER_SCE;
#ifdef CONFIG_X86_64
ignore_bits |= EFER_LMA | EFER_LME;
/* SCE is meaningful only in long mode on Intel */
if (guest_efer & EFER_LMA)
ignore_bits &= ~(u64)EFER_SCE;
#endif
- guest_efer &= ~ignore_bits;
- guest_efer |= host_efer & ignore_bits;
- vmx->guest_msrs[efer_offset].data = guest_efer;
- vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
clear_atomic_switch_msr(vmx, MSR_EFER);
*/
if (cpu_has_load_ia32_efer ||
(enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
- guest_efer = vmx->vcpu.arch.efer;
if (!(guest_efer & EFER_LMA))
guest_efer &= ~EFER_LME;
if (guest_efer != host_efer)
add_atomic_switch_msr(vmx, MSR_EFER,
guest_efer, host_efer);
return false;
- }
+ } else {
+ guest_efer &= ~ignore_bits;
+ guest_efer |= host_efer & ignore_bits;
- return true;
+ vmx->guest_msrs[efer_offset].data = guest_efer;
+ vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
+
+ return true;
+ }
}
static unsigned long segment_base(u16 selector)
* Use cpu_tss as a cacheline-aligned, seldomly
* accessed per-cpu variable as the monitor target.
*/
- __monitorx(this_cpu_ptr(&cpu_tss), 0, 0);
+ __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0);
/*
* AMD, like Intel, supports the EAX hint and EAX=0xf
EXPORT_SYMBOL_GPL(efi_query_variable_store);
/*
+ * Helper function for efi_reserve_boot_services() to figure out if we
+ * can free regions in efi_free_boot_services().
+ *
+ * Use this function to ensure we do not free regions owned by somebody
+ * else. We must only reserve (and then free) regions:
+ *
+ * - Not within any part of the kernel
+ * - Not the BIOS reserved area (E820_RESERVED, E820_NVS, etc)
+ */
+static bool can_free_region(u64 start, u64 size)
+{
+ if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end))
+ return false;
+
+ if (!e820_all_mapped(start, start+size, E820_RAM))
+ return false;
+
+ return true;
+}
+
+/*
* The UEFI specification makes it clear that the operating system is free to do
* whatever it wants with boot services code after ExitBootServices() has been
* called. Ignoring this recommendation a significant bunch of EFI implementations
efi_memory_desc_t *md = p;
u64 start = md->phys_addr;
u64 size = md->num_pages << EFI_PAGE_SHIFT;
+ bool already_reserved;
if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA)
continue;
- /* Only reserve where possible:
- * - Not within any already allocated areas
- * - Not over any memory area (really needed, if above?)
- * - Not within any part of the kernel
- * - Not the bios reserved area
- */
- if ((start + size > __pa_symbol(_text)
- && start <= __pa_symbol(_end)) ||
- !e820_all_mapped(start, start+size, E820_RAM) ||
- memblock_is_region_reserved(start, size)) {
- /* Could not reserve, skip it */
- md->num_pages = 0;
- memblock_dbg("Could not reserve boot range [0x%010llx-0x%010llx]\n",
- start, start+size-1);
- } else
+
+ already_reserved = memblock_is_region_reserved(start, size);
+
+ /*
+ * Because the following memblock_reserve() is paired
+ * with free_bootmem_late() for this region in
+ * efi_free_boot_services(), we must be extremely
+ * careful not to reserve, and subsequently free,
+ * critical regions of memory (like the kernel image) or
+ * those regions that somebody else has already
+ * reserved.
+ *
+ * A good example of a critical region that must not be
+ * freed is page zero (first 4Kb of memory), which may
+ * contain boot services code/data but is marked
+ * E820_RESERVED by trim_bios_range().
+ */
+ if (!already_reserved) {
memblock_reserve(start, size);
+
+ /*
+ * If we are the first to reserve the region, no
+ * one else cares about it. We own it and can
+ * free it later.
+ */
+ if (can_free_region(start, size))
+ continue;
+ }
+
+ /*
+ * We don't own the region. We must not free it.
+ *
+ * Setting this bit for a boot services region really
+ * doesn't make sense as far as the firmware is
+ * concerned, but it does provide us with a way to tag
+ * those regions that must not be paired with
+ * free_bootmem_late().
+ */
+ md->attribute |= EFI_MEMORY_RUNTIME;
}
}
md->type != EFI_BOOT_SERVICES_DATA)
continue;
- /* Could not reserve boot area */
- if (!size)
+ /* Do not free, someone else owns it: */
+ if (md->attribute & EFI_MEMORY_RUNTIME)
continue;
free_bootmem_late(start, size);
if (count < 0) {
return NULL;
} else if (count > 0) {
- resources = kmalloc(count * sizeof(struct resource),
+ resources = kzalloc(count * sizeof(struct resource),
GFP_KERNEL);
if (!resources) {
dev_err(&adev->dev, "No memory for resources\n");
*/
if (ACPI_SUCCESS(status) &&
possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
- if (GET_CURRENT_ARG_TYPE(walk_state->arg_types) ==
- ARGP_SUPERNAME) {
+ if (walk_state->opcode == AML_UNLOAD_OP) {
/*
* acpi_ps_get_next_namestring has increased the AML pointer,
* so we need to restore the saved AML pointer for method call.
*
* PARAMETERS: walk_state - Current state
* parser_state - Current parser state object
- * arg_type - The parser argument type (ARGP_*)
+ * arg_type - The argument type (AML_*_ARG)
* return_arg - Where the next arg is returned
*
* RETURN: Status, and an op object containing the next argument.
return_ACPI_STATUS(AE_NO_MEMORY);
}
- /* super_name allows argument to be a method call */
+ /* To support super_name arg of Unload */
- if (arg_type == ARGP_SUPERNAME) {
+ if (walk_state->opcode == AML_UNLOAD_OP) {
status =
acpi_ps_get_next_namepath(walk_state,
parser_state, arg,
u64 param3, u64 param4)
{
int rc;
- unsigned long pfn;
+ u64 base_addr, size;
/* If user manually set "flags", make sure it is legal */
if (flags && (flags &
/*
* Disallow crazy address masks that give BIOS leeway to pick
* injection address almost anywhere. Insist on page or
- * better granularity and that target address is normal RAM.
+ * better granularity and that target address is normal RAM or
+ * NVDIMM.
*/
- pfn = PFN_DOWN(param1 & param2);
- if (!page_is_ram(pfn) || ((param2 & PAGE_MASK) != PAGE_MASK))
+ base_addr = param1 & param2;
+ size = ~param2 + 1;
+
+ if (((param2 & PAGE_MASK) != PAGE_MASK) ||
+ ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
+ != REGION_INTERSECTS) &&
+ (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
+ != REGION_INTERSECTS)))
return -EINVAL;
inject:
bool ret;
ret = __fwnode_property_present(fwnode, propname);
- if (ret == false && fwnode && fwnode->secondary)
+ if (ret == false && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
ret = __fwnode_property_present(fwnode->secondary, propname);
return ret;
}
int _ret_; \
_ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \
_val_, _nval_); \
- if (_ret_ == -EINVAL && _fwnode_ && _fwnode_->secondary) \
+ if (_ret_ == -EINVAL && _fwnode_ && !IS_ERR_OR_NULL(_fwnode_->secondary)) \
_ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \
_proptype_, _val_, _nval_); \
_ret_; \
int ret;
ret = __fwnode_property_read_string_array(fwnode, propname, val, nval);
- if (ret == -EINVAL && fwnode && fwnode->secondary)
+ if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
ret = __fwnode_property_read_string_array(fwnode->secondary,
propname, val, nval);
return ret;
int ret;
ret = __fwnode_property_read_string(fwnode, propname, val);
- if (ret == -EINVAL && fwnode && fwnode->secondary)
+ if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
ret = __fwnode_property_read_string(fwnode->secondary,
propname, val);
return ret;
#define AT_XDMAC_MAX_CHAN 0x20
#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
+#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
#define AT_XDMAC_DMA_BUSWIDTHS\
(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
struct at_xdmac_desc *desc, *_desc;
struct list_head *descs_list;
enum dma_status ret;
- int residue;
- u32 cur_nda, mask, value;
+ int residue, retry;
+ u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
cpu_relax();
}
+ /*
+ * When processing the residue, we need to read two registers but we
+ * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
+ * we stand in the descriptor list and AT_XDMAC_CUBC is used
+ * to know how many data are remaining for the current descriptor.
+ * Since the dma channel is not paused to not loose data, between the
+ * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
+ * descriptor.
+ * For that reason, after reading AT_XDMAC_CUBC, we check if we are
+ * still using the same descriptor by reading a second time
+ * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
+ * read again AT_XDMAC_CUBC.
+ * Memory barriers are used to ensure the read order of the registers.
+ * A max number of retries is set because unlikely it can never ends if
+ * we are transferring a lot of data with small buffers.
+ */
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ rmb();
+ cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
+ rmb();
+ check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+
+ if (likely(cur_nda == check_nda))
+ break;
+
+ cur_nda = check_nda;
+ rmb();
+ cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ }
+
+ if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
+ ret = DMA_ERROR;
+ goto spin_unlock;
+ }
+
/*
* Remove size of all microblocks already transferred and the current
* one. Then add the remaining size to transfer of the current
if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
break;
}
- residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
+ residue += cur_ubc << dwidth;
dma_set_residue(txstate, residue);
chan_dbg(chan, "LD %p callback\n", desc);
txd->callback(txd->callback_param);
}
+
+ dma_descriptor_unmap(txd);
}
/* Run any dependencies */
mutex_lock(&dev_priv->av_mutex);
intel_dig_port->audio_connector = connector;
+ /* referred in audio callbacks */
+ dev_priv->dig_port_map[port] = intel_encoder;
mutex_unlock(&dev_priv->av_mutex);
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
mutex_lock(&dev_priv->av_mutex);
intel_dig_port->audio_connector = NULL;
+ dev_priv->dig_port_map[port] = NULL;
mutex_unlock(&dev_priv->av_mutex);
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
intel_encoder->get_config = intel_ddi_get_config;
intel_dig_port->port = port;
- dev_priv->dig_port_map[port] = intel_encoder;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
}
intel_dig_port->port = port;
- dev_priv->dig_port_map[port] = intel_encoder;
intel_dig_port->dp.output_reg = output_reg;
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
void intel_hdmi_init(struct drm_device *dev,
i915_reg_t hdmi_reg, enum port port)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
intel_dig_port->port = port;
- dev_priv->dig_port_map[port] = intel_encoder;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
bus->adapter.algo = &gmbus_algorithm;
+ /*
+ * We wish to retry with bit banging
+ * after a timed out GMBUS attempt.
+ */
+ bus->adapter.retries = 1;
+
/* By default use a conservative clock rate */
bus->reg0 = pin | GMBUS_RATE_100KHZ;
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+/* We need to access legacy defines from linux/media.h */
+#define __NEED_MEDIA_LEGACY_API
+
#include <linux/compat.h>
#include <linux/export.h>
#include <linux/idr.h>
u_ent.group_id = 0; /* Unused */
u_ent.pads = ent->num_pads;
u_ent.links = ent->num_links - ent->num_backlinks;
+
+ /*
+ * Workaround for a bug at media-ctl <= v1.10 that makes it to
+ * do the wrong thing if the entity function doesn't belong to
+ * either MEDIA_ENT_F_OLD_BASE or MEDIA_ENT_F_OLD_SUBDEV_BASE
+ * Ranges.
+ *
+ * Non-subdevices are expected to be at the MEDIA_ENT_F_OLD_BASE,
+ * or, otherwise, will be silently ignored by media-ctl when
+ * printing the graphviz diagram. So, map them into the devnode
+ * old range.
+ */
+ if (ent->function < MEDIA_ENT_F_OLD_BASE ||
+ ent->function > MEDIA_ENT_T_DEVNODE_UNKNOWN) {
+ if (is_media_entity_v4l2_subdev(ent))
+ u_ent.type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ else if (ent->function != MEDIA_ENT_F_IO_V4L)
+ u_ent.type = MEDIA_ENT_T_DEVNODE_UNKNOWN;
+ }
+
memcpy(&u_ent.raw, &ent->info, sizeof(ent->info));
if (copy_to_user(uent, &u_ent, sizeof(u_ent)))
return -EFAULT;
__nand_calculate_ecc(error_data, size, calc_ecc);
ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
- return (ret == -1) ? 0 : -EINVAL;
+ return (ret == -EBADMSG) ? 0 : -EINVAL;
}
static const struct nand_ecc_test nand_ecc_test[] = {
for (p = iomem_resource.child; p ; p = p->sibling) {
struct nd_region_desc ndr_desc;
- if (strncmp(p->name, "Persistent Memory (legacy)", 26) != 0)
+ if (p->desc != IORES_DESC_PERSISTENT_MEMORY_LEGACY)
continue;
memset(&ndr_desc, 0, sizeof(ndr_desc));
for (i=0;i<HPEE_MEMORY_MAX_ENT;i++) {
c = get_8(buf+len);
- if (NULL != (res = kmalloc(sizeof(struct resource), GFP_KERNEL))) {
+ if (NULL != (res = kzalloc(sizeof(struct resource), GFP_KERNEL))) {
int result;
res->name = name;
for (i=0;i<HPEE_PORT_MAX_ENT;i++) {
c = get_8(buf+len);
- if (NULL != (res = kmalloc(sizeof(struct resource), GFP_KERNEL))) {
+ if (NULL != (res = kzalloc(sizeof(struct resource), GFP_KERNEL))) {
res->name = board;
res->start = get_16(buf+len+1);
res->end = get_16(buf+len+1)+(c&HPEE_PORT_SIZE_MASK)+1;
if (mport->ops->open_inb_mbox == NULL)
goto out;
- res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (res) {
rio_init_mbox_res(res, mbox, mbox);
if (mport->ops->open_outb_mbox == NULL)
goto out;
- res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (res) {
rio_init_mbox_res(res, mbox, mbox);
{
int rc = 0;
- struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ struct resource *res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (res) {
rio_init_dbell_res(res, start, end);
struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start,
u16 end)
{
- struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ struct resource *res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (res) {
rio_init_dbell_res(res, start, end);
* and function code cmd.
* In case of an exception return 3. Otherwise return result of bitwise OR of
* resulting condition code and DIAG return code. */
-static inline int dia250(void *iob, int cmd)
+static inline int __dia250(void *iob, int cmd)
{
register unsigned long reg2 asm ("2") = (unsigned long) iob;
typedef union {
int rc;
rc = 3;
- diag_stat_inc(DIAG_STAT_X250);
asm volatile(
" diag 2,%2,0x250\n"
"0: ipm %0\n"
return rc;
}
+static inline int dia250(void *iob, int cmd)
+{
+ diag_stat_inc(DIAG_STAT_X250);
+ return __dia250(iob, cmd);
+}
+
/* Initialize block I/O to DIAG device using the specified blocksize and
* block offset. On success, return zero and set end_block to contain the
* number of blocks on the device minus the specified offset. Return non-zero
superhyway_read_vcr(dev, base, &dev->vcr);
if (!dev->resource) {
- dev->resource = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ dev->resource = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (!dev->resource) {
kfree(dev);
return -ENOMEM;
if (!__target_check_io_state(se_cmd, se_sess, 0)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- target_put_sess_cmd(se_cmd);
goto out;
}
list_del_init(&se_cmd->se_cmd_list);
return NULL;
res->name = "System RAM";
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
ret = allocate_resource(&iomem_resource, res,
size, 0, -1,
pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
__func__, ret);
- /* Might as well let the VFS know */
- d_instantiate(new_dentry, d_inode(old_dentry));
- ihold(d_inode(old_dentry));
+ /*
+ * We can't keep the target in dcache after that.
+ * For one thing, we can't afford dentry aliases for directories.
+ * For another, if there was a victim, we _can't_ set new inode
+ * for that sucker and we have to trigger mount eviction - the
+ * caller won't do it on its own since we are returning an error.
+ */
+ d_invalidate(new_dentry);
new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
return ret;
}
d_rehash(newdent);
} else {
spin_lock(&dentry->d_lock);
- NCP_FINFO(inode)->flags &= ~NCPI_DIR_CACHE;
+ NCP_FINFO(dir)->flags &= ~NCPI_DIR_CACHE;
spin_unlock(&dentry->d_lock);
}
} else {
bool tmp_wrapped;
/*
- * Search backwards through the log looking for the log record header
- * block. This wraps all the way back around to the head so something is
- * seriously wrong if we can't find it.
- */
- found = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp, rhead_blk,
- rhead, wrapped);
- if (found < 0)
- return found;
- if (!found) {
- xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
- return -EIO;
- }
-
- *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
-
- /*
- * Now that we have a tail block, check the head of the log for torn
- * writes. Search again until we hit the tail or the maximum number of
- * log record I/Os that could have been in flight at one time. Use a
- * temporary buffer so we don't trash the rhead/bp pointer from the
- * call above.
+ * Check the head of the log for torn writes. Search backwards from the
+ * head until we hit the tail or the maximum number of log record I/Os
+ * that could have been in flight at one time. Use a temporary buffer so
+ * we don't trash the rhead/bp pointers from the caller.
*/
tmp_bp = xlog_get_bp(log, 1);
if (!tmp_bp)
}
/*
+ * Check whether the head of the log points to an unmount record. In other
+ * words, determine whether the log is clean. If so, update the in-core state
+ * appropriately.
+ */
+static int
+xlog_check_unmount_rec(
+ struct xlog *log,
+ xfs_daddr_t *head_blk,
+ xfs_daddr_t *tail_blk,
+ struct xlog_rec_header *rhead,
+ xfs_daddr_t rhead_blk,
+ struct xfs_buf *bp,
+ bool *clean)
+{
+ struct xlog_op_header *op_head;
+ xfs_daddr_t umount_data_blk;
+ xfs_daddr_t after_umount_blk;
+ int hblks;
+ int error;
+ char *offset;
+
+ *clean = false;
+
+ /*
+ * Look for unmount record. If we find it, then we know there was a
+ * clean unmount. Since 'i' could be the last block in the physical
+ * log, we convert to a log block before comparing to the head_blk.
+ *
+ * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
+ * below. We won't want to clear the unmount record if there is one, so
+ * we pass the lsn of the unmount record rather than the block after it.
+ */
+ if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
+ int h_size = be32_to_cpu(rhead->h_size);
+ int h_version = be32_to_cpu(rhead->h_version);
+
+ if ((h_version & XLOG_VERSION_2) &&
+ (h_size > XLOG_HEADER_CYCLE_SIZE)) {
+ hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
+ if (h_size % XLOG_HEADER_CYCLE_SIZE)
+ hblks++;
+ } else {
+ hblks = 1;
+ }
+ } else {
+ hblks = 1;
+ }
+ after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
+ after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
+ if (*head_blk == after_umount_blk &&
+ be32_to_cpu(rhead->h_num_logops) == 1) {
+ umount_data_blk = rhead_blk + hblks;
+ umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
+ error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
+ if (error)
+ return error;
+
+ op_head = (struct xlog_op_header *)offset;
+ if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
+ /*
+ * Set tail and last sync so that newly written log
+ * records will point recovery to after the current
+ * unmount record.
+ */
+ xlog_assign_atomic_lsn(&log->l_tail_lsn,
+ log->l_curr_cycle, after_umount_blk);
+ xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
+ log->l_curr_cycle, after_umount_blk);
+ *tail_blk = after_umount_blk;
+
+ *clean = true;
+ }
+ }
+
+ return 0;
+}
+
+static void
+xlog_set_state(
+ struct xlog *log,
+ xfs_daddr_t head_blk,
+ struct xlog_rec_header *rhead,
+ xfs_daddr_t rhead_blk,
+ bool bump_cycle)
+{
+ /*
+ * Reset log values according to the state of the log when we
+ * crashed. In the case where head_blk == 0, we bump curr_cycle
+ * one because the next write starts a new cycle rather than
+ * continuing the cycle of the last good log record. At this
+ * point we have guaranteed that all partial log records have been
+ * accounted for. Therefore, we know that the last good log record
+ * written was complete and ended exactly on the end boundary
+ * of the physical log.
+ */
+ log->l_prev_block = rhead_blk;
+ log->l_curr_block = (int)head_blk;
+ log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
+ if (bump_cycle)
+ log->l_curr_cycle++;
+ atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
+ atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
+ xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
+ BBTOB(log->l_curr_block));
+ xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
+ BBTOB(log->l_curr_block));
+}
+
+/*
* Find the sync block number or the tail of the log.
*
* This will be the block number of the last record to have its
xfs_daddr_t *tail_blk)
{
xlog_rec_header_t *rhead;
- xlog_op_header_t *op_head;
char *offset = NULL;
xfs_buf_t *bp;
int error;
- xfs_daddr_t umount_data_blk;
- xfs_daddr_t after_umount_blk;
xfs_daddr_t rhead_blk;
xfs_lsn_t tail_lsn;
- int hblks;
bool wrapped = false;
+ bool clean = false;
/*
* Find previous log record
*/
if ((error = xlog_find_head(log, head_blk)))
return error;
+ ASSERT(*head_blk < INT_MAX);
bp = xlog_get_bp(log, 1);
if (!bp)
}
/*
- * Trim the head block back to skip over torn records. We can have
- * multiple log I/Os in flight at any time, so we assume CRC failures
- * back through the previous several records are torn writes and skip
- * them.
+ * Search backwards through the log looking for the log record header
+ * block. This wraps all the way back around to the head so something is
+ * seriously wrong if we can't find it.
*/
- ASSERT(*head_blk < INT_MAX);
- error = xlog_verify_head(log, head_blk, tail_blk, bp, &rhead_blk,
- &rhead, &wrapped);
- if (error)
- goto done;
+ error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp,
+ &rhead_blk, &rhead, &wrapped);
+ if (error < 0)
+ return error;
+ if (!error) {
+ xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
+ return -EIO;
+ }
+ *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
/*
- * Reset log values according to the state of the log when we
- * crashed. In the case where head_blk == 0, we bump curr_cycle
- * one because the next write starts a new cycle rather than
- * continuing the cycle of the last good log record. At this
- * point we have guaranteed that all partial log records have been
- * accounted for. Therefore, we know that the last good log record
- * written was complete and ended exactly on the end boundary
- * of the physical log.
+ * Set the log state based on the current head record.
*/
- log->l_prev_block = rhead_blk;
- log->l_curr_block = (int)*head_blk;
- log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
- if (wrapped)
- log->l_curr_cycle++;
- atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
- atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
- xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
- BBTOB(log->l_curr_block));
- xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
- BBTOB(log->l_curr_block));
+ xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
+ tail_lsn = atomic64_read(&log->l_tail_lsn);
/*
- * Look for unmount record. If we find it, then we know there
- * was a clean unmount. Since 'i' could be the last block in
- * the physical log, we convert to a log block before comparing
- * to the head_blk.
+ * Look for an unmount record at the head of the log. This sets the log
+ * state to determine whether recovery is necessary.
+ */
+ error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
+ rhead_blk, bp, &clean);
+ if (error)
+ goto done;
+
+ /*
+ * Verify the log head if the log is not clean (e.g., we have anything
+ * but an unmount record at the head). This uses CRC verification to
+ * detect and trim torn writes. If discovered, CRC failures are
+ * considered torn writes and the log head is trimmed accordingly.
*
- * Save the current tail lsn to use to pass to
- * xlog_clear_stale_blocks() below. We won't want to clear the
- * unmount record if there is one, so we pass the lsn of the
- * unmount record rather than the block after it.
+ * Note that we can only run CRC verification when the log is dirty
+ * because there's no guarantee that the log data behind an unmount
+ * record is compatible with the current architecture.
*/
- if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
- int h_size = be32_to_cpu(rhead->h_size);
- int h_version = be32_to_cpu(rhead->h_version);
+ if (!clean) {
+ xfs_daddr_t orig_head = *head_blk;
- if ((h_version & XLOG_VERSION_2) &&
- (h_size > XLOG_HEADER_CYCLE_SIZE)) {
- hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
- if (h_size % XLOG_HEADER_CYCLE_SIZE)
- hblks++;
- } else {
- hblks = 1;
- }
- } else {
- hblks = 1;
- }
- after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
- after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
- tail_lsn = atomic64_read(&log->l_tail_lsn);
- if (*head_blk == after_umount_blk &&
- be32_to_cpu(rhead->h_num_logops) == 1) {
- umount_data_blk = rhead_blk + hblks;
- umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
- error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
+ error = xlog_verify_head(log, head_blk, tail_blk, bp,
+ &rhead_blk, &rhead, &wrapped);
if (error)
goto done;
- op_head = (xlog_op_header_t *)offset;
- if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
- /*
- * Set tail and last sync so that newly written
- * log records will point recovery to after the
- * current unmount record.
- */
- xlog_assign_atomic_lsn(&log->l_tail_lsn,
- log->l_curr_cycle, after_umount_blk);
- xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
- log->l_curr_cycle, after_umount_blk);
- *tail_blk = after_umount_blk;
-
- /*
- * Note that the unmount was clean. If the unmount
- * was not clean, we need to know this to rebuild the
- * superblock counters from the perag headers if we
- * have a filesystem using non-persistent counters.
- */
- log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
+ /* update in-core state again if the head changed */
+ if (*head_blk != orig_head) {
+ xlog_set_state(log, *head_blk, rhead, rhead_blk,
+ wrapped);
+ tail_lsn = atomic64_read(&log->l_tail_lsn);
+ error = xlog_check_unmount_rec(log, head_blk, tail_blk,
+ rhead, rhead_blk, bp,
+ &clean);
+ if (error)
+ goto done;
}
}
/*
+ * Note that the unmount was clean. If the unmount was not clean, we
+ * need to know this to rebuild the superblock counters from the perag
+ * headers if we have a filesystem using non-persistent counters.
+ */
+ if (clean)
+ log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
+
+ /*
* Make sure that there are no blocks in front of the head
* with the same cycle number as the head. This can happen
* because we allow multiple outstanding log writes concurrently,
struct bvec_iter iter = bio->bi_iter;
int idx;
- if (!bio_flagged(bio, BIO_CLONED)) {
- *bv = bio->bi_io_vec[bio->bi_vcnt - 1];
- return;
- }
-
if (unlikely(!bio_multiple_segments(bio))) {
*bv = bio_iovec(bio);
return;
resource_size_t end;
const char *name;
unsigned long flags;
+ unsigned long desc;
struct resource *parent, *sibling, *child;
};
#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
+#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
+#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
+
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
+
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
#define IORESOURCE_AUTO 0x40000000
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
+/* I/O resource extended types */
+#define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM)
+
/* PnP IRQ specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
#define IORESOURCE_IRQ_LOWEDGE (1<<1)
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
+/*
+ * I/O Resource Descriptors
+ *
+ * Descriptors are used by walk_iomem_res_desc() and region_intersects()
+ * for searching a specific resource range in the iomem table. Assign
+ * a new descriptor when a resource range supports the search interfaces.
+ * Otherwise, resource.desc must be set to IORES_DESC_NONE (0).
+ */
+enum {
+ IORES_DESC_NONE = 0,
+ IORES_DESC_CRASH_KERNEL = 1,
+ IORES_DESC_ACPI_TABLES = 2,
+ IORES_DESC_ACPI_NV_STORAGE = 3,
+ IORES_DESC_PERSISTENT_MEMORY = 4,
+ IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
+};
/* helpers to define resources */
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
+ .desc = IORES_DESC_NONE, \
}
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
{
return res->flags & IORESOURCE_TYPE_BITS;
}
+static inline unsigned long resource_ext_type(const struct resource *res)
+{
+ return res->flags & IORESOURCE_EXT_TYPE_BITS;
+}
/* True iff r1 completely contains r2 */
static inline bool resource_contains(struct resource *r1, struct resource *r2)
{
walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(u64, u64, void *));
extern int
-walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end, void *arg,
- int (*func)(u64, u64, void *));
+walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
+ void *arg, int (*func)(u64, u64, void *));
/* True if any part of r1 overlaps r2 */
static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
REGION_MIXED,
};
-int region_intersects(resource_size_t offset, size_t size, const char *type);
+int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
+ unsigned long desc);
/* Support for virtually mapped pages */
struct page *vmalloc_to_page(const void *addr);
#define MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN MEDIA_ENT_F_OLD_SUBDEV_BASE
-#ifndef __KERNEL__
+#if !defined(__KERNEL__) || defined(__NEED_MEDIA_LEGACY_API)
/*
* Legacy symbols used to avoid userspace compilation breakages
#define MEDIA_ENT_TYPE_MASK 0x00ff0000
#define MEDIA_ENT_SUBTYPE_MASK 0x0000ffff
+/* End of the old subdev reserved numberspace */
+#define MEDIA_ENT_T_DEVNODE_UNKNOWN (MEDIA_ENT_T_DEVNODE | \
+ MEDIA_ENT_SUBTYPE_MASK)
+
#define MEDIA_ENT_T_DEVNODE MEDIA_ENT_F_OLD_BASE
#define MEDIA_ENT_T_DEVNODE_V4L MEDIA_ENT_F_IO_V4L
#define MEDIA_ENT_T_DEVNODE_FB (MEDIA_ENT_T_DEVNODE + 2)
.name = "Crash kernel",
.start = 0,
.end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
+ .desc = IORES_DESC_CRASH_KERNEL
};
struct resource crashk_low_res = {
.name = "Crash kernel",
.start = 0,
.end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
+ .desc = IORES_DESC_CRASH_KERNEL
};
int kexec_should_crash(struct task_struct *p)
ram_res->start = end;
ram_res->end = crashk_res.end;
- ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+ ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
ram_res->name = "System RAM";
crashk_res.end = end - 1;
/* Walk the RAM ranges and allocate a suitable range for the buffer */
if (image->type == KEXEC_TYPE_CRASH)
- ret = walk_iomem_res("Crash kernel",
- IORESOURCE_MEM | IORESOURCE_BUSY,
- crashk_res.start, crashk_res.end, kbuf,
- locate_mem_hole_callback);
+ ret = walk_iomem_res_desc(crashk_res.desc,
+ IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
+ crashk_res.start, crashk_res.end, kbuf,
+ locate_mem_hole_callback);
else
ret = walk_system_ram_res(0, -1, kbuf,
locate_mem_hole_callback);
* being mapped does not have i/o side effects and the __iomem
* annotation is not applicable.
*
- * MEMREMAP_WB - matches the default mapping for "System RAM" on
+ * MEMREMAP_WB - matches the default mapping for System RAM on
* the architecture. This is usually a read-allocate write-back cache.
* Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
* memremap() will bypass establishing a new mapping and instead return
* MEMREMAP_WT - establish a mapping whereby writes either bypass the
* cache or are written through to memory and never exist in a
* cache-dirty state with respect to program visibility. Attempts to
- * map "System RAM" with this mapping type will fail.
+ * map System RAM with this mapping type will fail.
*/
void *memremap(resource_size_t offset, size_t size, unsigned long flags)
{
- int is_ram = region_intersects(offset, size, "System RAM");
+ int is_ram = region_intersects(offset, size,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
void *addr = NULL;
if (is_ram == REGION_MIXED) {
* MEMREMAP_WB is special in that it can be satisifed
* from the direct map. Some archs depend on the
* capability of memremap() to autodetect cases where
- * the requested range is potentially in "System RAM"
+ * the requested range is potentially in System RAM.
*/
if (is_ram == REGION_INTERSECTS)
addr = try_ram_remap(offset, size);
* If we don't have a mapping yet and more request flags are
* pending then we will be attempting to establish a new virtual
* address mapping. Enforce that this mapping is not aliasing
- * "System RAM"
+ * System RAM.
*/
if (!addr && is_ram == REGION_INTERSECTS && flags) {
WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
- align_start;
- is_ram = region_intersects(align_start, align_size, "System RAM");
+ is_ram = region_intersects(align_start, align_size,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
if (is_ram == REGION_MIXED) {
WARN_ONCE(1, "%s attempted on mixed region %pr\n",
EXPORT_SYMBOL(release_resource);
/*
- * Finds the lowest iomem reosurce exists with-in [res->start.res->end)
- * the caller must specify res->start, res->end, res->flags and "name".
- * If found, returns 0, res is overwritten, if not found, returns -1.
- * This walks through whole tree and not just first level children
- * until and unless first_level_children_only is true.
+ * Finds the lowest iomem resource existing within [res->start.res->end).
+ * The caller must specify res->start, res->end, res->flags, and optionally
+ * desc. If found, returns 0, res is overwritten, if not found, returns -1.
+ * This function walks the whole tree and not just first level children until
+ * and unless first_level_children_only is true.
*/
-static int find_next_iomem_res(struct resource *res, char *name,
+static int find_next_iomem_res(struct resource *res, unsigned long desc,
bool first_level_children_only)
{
resource_size_t start, end;
read_lock(&resource_lock);
for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) {
- if (p->flags != res->flags)
+ if ((p->flags & res->flags) != res->flags)
continue;
- if (name && strcmp(p->name, name))
+ if ((desc != IORES_DESC_NONE) && (desc != p->desc))
continue;
if (p->start > end) {
p = NULL;
* Walks through iomem resources and calls func() with matching resource
* ranges. This walks through whole tree and not just first level children.
* All the memory ranges which overlap start,end and also match flags and
- * name are valid candidates.
+ * desc are valid candidates.
*
- * @name: name of resource
- * @flags: resource flags
+ * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
+ * @flags: I/O resource flags
* @start: start addr
* @end: end addr
+ *
+ * NOTE: For a new descriptor search, define a new IORES_DESC in
+ * <linux/ioport.h> and set it in 'desc' of a target resource entry.
*/
-int walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end,
- void *arg, int (*func)(u64, u64, void *))
+int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
+ u64 end, void *arg, int (*func)(u64, u64, void *))
{
struct resource res;
u64 orig_end;
res.end = end;
res.flags = flags;
orig_end = res.end;
+
while ((res.start < res.end) &&
- (!find_next_iomem_res(&res, name, false))) {
+ (!find_next_iomem_res(&res, desc, false))) {
+
ret = (*func)(res.start, res.end, arg);
if (ret)
break;
+
res.start = res.end + 1;
res.end = orig_end;
}
+
return ret;
}
/*
- * This function calls callback against all memory range of "System RAM"
- * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
- * Now, this function is only for "System RAM". This function deals with
- * full ranges and not pfn. If resources are not pfn aligned, dealing
- * with pfn can truncate ranges.
+ * This function calls the @func callback against all memory ranges of type
+ * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
+ * Now, this function is only for System RAM, it deals with full ranges and
+ * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
+ * ranges.
*/
int walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(u64, u64, void *))
res.start = start;
res.end = end;
- res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
orig_end = res.end;
while ((res.start < res.end) &&
- (!find_next_iomem_res(&res, "System RAM", true))) {
+ (!find_next_iomem_res(&res, IORES_DESC_NONE, true))) {
ret = (*func)(res.start, res.end, arg);
if (ret)
break;
#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
/*
- * This function calls callback against all memory range of "System RAM"
- * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
- * Now, this function is only for "System RAM".
+ * This function calls the @func callback against all memory ranges of type
+ * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
+ * It is to be used only for System RAM.
*/
int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg, int (*func)(unsigned long, unsigned long, void *))
res.start = (u64) start_pfn << PAGE_SHIFT;
res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
- res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
orig_end = res.end;
while ((res.start < res.end) &&
- (find_next_iomem_res(&res, "System RAM", true) >= 0)) {
+ (find_next_iomem_res(&res, IORES_DESC_NONE, true) >= 0)) {
pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
end_pfn = (res.end + 1) >> PAGE_SHIFT;
if (end_pfn > pfn)
}
/*
* This generic page_is_ram() returns true if specified address is
- * registered as "System RAM" in iomem_resource list.
+ * registered as System RAM in iomem_resource list.
*/
int __weak page_is_ram(unsigned long pfn)
{
* region_intersects() - determine intersection of region with known resources
* @start: region start address
* @size: size of region
- * @name: name of resource (in iomem_resource)
+ * @flags: flags of resource (in iomem_resource)
+ * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
*
* Check if the specified region partially overlaps or fully eclipses a
- * resource identified by @name. Return REGION_DISJOINT if the region
- * does not overlap @name, return REGION_MIXED if the region overlaps
- * @type and another resource, and return REGION_INTERSECTS if the
- * region overlaps @type and no other defined resource. Note, that
- * REGION_INTERSECTS is also returned in the case when the specified
- * region overlaps RAM and undefined memory holes.
+ * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
+ * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
+ * return REGION_MIXED if the region overlaps @flags/@desc and another
+ * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
+ * and no other defined resource. Note that REGION_INTERSECTS is also
+ * returned in the case when the specified region overlaps RAM and undefined
+ * memory holes.
*
* region_intersect() is used by memory remapping functions to ensure
* the user is not remapping RAM and is a vast speed up over walking
* through the resource table page by page.
*/
-int region_intersects(resource_size_t start, size_t size, const char *name)
+int region_intersects(resource_size_t start, size_t size, unsigned long flags,
+ unsigned long desc)
{
- unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
resource_size_t end = start + size - 1;
int type = 0; int other = 0;
struct resource *p;
read_lock(&resource_lock);
for (p = iomem_resource.child; p ; p = p->sibling) {
- bool is_type = strcmp(p->name, name) == 0 && p->flags == flags;
+ bool is_type = (((p->flags & flags) == flags) &&
+ ((desc == IORES_DESC_NONE) ||
+ (desc == p->desc)));
if (start >= p->start && start <= p->end)
is_type ? type++ : other++;
return REGION_DISJOINT;
}
+EXPORT_SYMBOL_GPL(region_intersects);
void __weak arch_remove_reservations(struct resource *avail)
{
res->start = start;
res->end = end;
res->flags = IORESOURCE_BUSY;
+ res->desc = IORES_DESC_NONE;
while (1) {
next_res->start = conflict->end + 1;
next_res->end = end;
next_res->flags = IORESOURCE_BUSY;
+ next_res->desc = IORES_DESC_NONE;
}
} else {
res->start = conflict->end + 1;
res->name = name;
res->start = start;
res->end = start + n - 1;
- res->flags = resource_type(parent);
+ res->flags = resource_type(parent) | resource_ext_type(parent);
res->flags |= IORESOURCE_BUSY | flags;
+ res->desc = IORES_DESC_NONE;
write_lock(&resource_lock);
new_res->start = end + 1;
new_res->end = res->end;
new_res->flags = res->flags;
+ new_res->desc = res->desc;
new_res->parent = res->parent;
new_res->sibling = res->sibling;
new_res->child = NULL;
res->start = io_start;
res->end = io_start + io_num - 1;
res->flags = IORESOURCE_BUSY;
+ res->desc = IORES_DESC_NONE;
res->child = NULL;
if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
reserved = x+1;
res->name = "System RAM";
res->start = start;
res->end = start + size - 1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, res) < 0) {
pr_debug("System RAM resource %pR cannot be added\n", res);
kfree(res);
void *element = pool->elements[--pool->curr_nr];
BUG_ON(pool->curr_nr < 0);
- check_element(pool, element);
kasan_unpoison_element(pool, element);
+ check_element(pool, element);
return element;
}
#!/usr/bin/awk -f
# extract linker version number from stdin and turn into single number
{
- gsub(".*)", "");
+ gsub(".*\\)", "");
gsub(".*version ", "");
gsub("-.*", "");
split($1,a, ".");
else
val *= halt_poll_ns_grow;
+ if (val > halt_poll_ns)
+ val = halt_poll_ns;
+
vcpu->halt_poll_ns = val;
trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
}