diff options
Diffstat (limited to 'arch/x86/platform')
-rw-r--r-- | arch/x86/platform/efi/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi-bgrt.c | 84 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi.c | 19 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi_32.c | 4 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi_64.c | 45 | ||||
-rw-r--r-- | arch/x86/platform/efi/quirks.c | 12 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/device_libs/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/device_libs/platform_bt.c | 108 | ||||
-rw-r--r-- | arch/x86/platform/uv/tlb_uv.c | 195 | ||||
-rw-r--r-- | arch/x86/platform/uv/uv_time.c | 2 |
10 files changed, 298 insertions, 175 deletions
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile index 066619b0700c..f1d83b34c329 100644 --- a/arch/x86/platform/efi/Makefile +++ b/arch/x86/platform/efi/Makefile @@ -1,6 +1,5 @@ OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o -obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o obj-$(CONFIG_EARLY_PRINTK_EFI) += early_printk.o obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c deleted file mode 100644 index 04ca8764f0c0..000000000000 --- a/arch/x86/platform/efi/efi-bgrt.c +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2012 Intel Corporation - * Author: Josh Triplett <josh@joshtriplett.org> - * - * Based on the bgrt driver: - * Copyright 2012 Red Hat, Inc <mjg@redhat.com> - * Author: Matthew Garrett - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/acpi.h> -#include <linux/efi.h> -#include <linux/efi-bgrt.h> - -struct acpi_table_bgrt bgrt_tab; -size_t __initdata bgrt_image_size; - -struct bmp_header { - u16 id; - u32 size; -} __packed; - -void __init efi_bgrt_init(struct acpi_table_header *table) -{ - void *image; - struct bmp_header bmp_header; - struct acpi_table_bgrt *bgrt = &bgrt_tab; - - if (acpi_disabled) - return; - - if (table->length < sizeof(bgrt_tab)) { - pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n", - table->length, sizeof(bgrt_tab)); - return; - } - *bgrt = *(struct acpi_table_bgrt *)table; - if (bgrt->version != 1) { - pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n", - bgrt->version); - goto out; - } - if (bgrt->status & 0xfe) { - pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n", - bgrt->status); - goto out; - } - if (bgrt->image_type != 0) { - pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n", - bgrt->image_type); - goto out; - } - if (!bgrt->image_address) { - pr_notice("Ignoring BGRT: null image address\n"); - goto out; - } - - image = early_memremap(bgrt->image_address, sizeof(bmp_header)); - if (!image) { - pr_notice("Ignoring BGRT: failed to map image header memory\n"); - goto out; - } - - memcpy(&bmp_header, image, sizeof(bmp_header)); - early_memunmap(image, sizeof(bmp_header)); - if (bmp_header.id != 0x4d42) { - pr_notice("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n", - bmp_header.id); - goto out; - } - bgrt_image_size = bmp_header.size; - efi_mem_reserve(bgrt->image_address, bgrt_image_size); - - return; -out: - memset(bgrt, 0, sizeof(bgrt_tab)); -} diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 565dff3c9a12..a15cf815ac4e 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -47,6 +47,7 @@ #include <asm/setup.h> #include <asm/efi.h> +#include <asm/e820/api.h> #include <asm/time.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> @@ -139,21 +140,21 @@ static void __init do_add_efi_memmap(void) case EFI_BOOT_SERVICES_DATA: case EFI_CONVENTIONAL_MEMORY: if (md->attribute & EFI_MEMORY_WB) - e820_type = E820_RAM; + e820_type = E820_TYPE_RAM; else - e820_type = E820_RESERVED; + e820_type = E820_TYPE_RESERVED; break; case EFI_ACPI_RECLAIM_MEMORY: - e820_type = E820_ACPI; + e820_type = E820_TYPE_ACPI; break; case EFI_ACPI_MEMORY_NVS: - e820_type = E820_NVS; + e820_type = E820_TYPE_NVS; break; case EFI_UNUSABLE_MEMORY: - e820_type = E820_UNUSABLE; + e820_type = E820_TYPE_UNUSABLE; break; case EFI_PERSISTENT_MEMORY: - e820_type = E820_PMEM; + e820_type = E820_TYPE_PMEM; break; default: /* @@ -161,12 +162,12 @@ static void __init do_add_efi_memmap(void) * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE */ - e820_type = E820_RESERVED; + e820_type = E820_TYPE_RESERVED; break; } - e820_add_region(start, size, e820_type); + e820__range_add(start, size, e820_type); } - sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map); + e820__update_table(e820_table); } int __init efi_memblock_x86_reserve_range(void) diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index cef39b097649..3481268da3d0 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -68,7 +68,7 @@ pgd_t * __init efi_call_phys_prolog(void) load_cr3(initial_page_table); __flush_tlb_all(); - gdt_descr.address = __pa(get_cpu_gdt_table(0)); + gdt_descr.address = get_cpu_gdt_paddr(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); @@ -79,7 +79,7 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) { struct desc_ptr gdt_descr; - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); + gdt_descr.address = (unsigned long)get_cpu_gdt_rw(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index a4695da42d77..c488625c9712 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -35,7 +35,7 @@ #include <asm/setup.h> #include <asm/page.h> -#include <asm/e820.h> +#include <asm/e820/api.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/proto.h> @@ -47,7 +47,7 @@ #include <asm/pgalloc.h> /* - * We allocate runtime services regions bottom-up, starting from -4G, i.e. + * We allocate runtime services regions top-down, starting from -4G, i.e. * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G. */ static u64 efi_va = EFI_VA_START; @@ -135,6 +135,7 @@ static pgd_t *efi_pgd; int __init efi_alloc_page_tables(void) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; gfp_t gfp_mask; @@ -147,15 +148,20 @@ int __init efi_alloc_page_tables(void) return -ENOMEM; pgd = efi_pgd + pgd_index(EFI_VA_END); + p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END); + if (!p4d) { + free_page((unsigned long)efi_pgd); + return -ENOMEM; + } - pud = pud_alloc_one(NULL, 0); + pud = pud_alloc(&init_mm, p4d, EFI_VA_END); if (!pud) { + if (CONFIG_PGTABLE_LEVELS > 4) + free_page((unsigned long) pgd_page_vaddr(*pgd)); free_page((unsigned long)efi_pgd); return -ENOMEM; } - pgd_populate(NULL, pgd, pud); - return 0; } @@ -166,6 +172,7 @@ void efi_sync_low_kernel_mappings(void) { unsigned num_entries; pgd_t *pgd_k, *pgd_efi; + p4d_t *p4d_k, *p4d_efi; pud_t *pud_k, *pud_efi; if (efi_enabled(EFI_OLD_MEMMAP)) @@ -190,23 +197,37 @@ void efi_sync_low_kernel_mappings(void) memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries); /* + * As with PGDs, we share all P4D entries apart from the one entry + * that covers the EFI runtime mapping space. + */ + BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END)); + BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK)); + + pgd_efi = efi_pgd + pgd_index(EFI_VA_END); + pgd_k = pgd_offset_k(EFI_VA_END); + p4d_efi = p4d_offset(pgd_efi, 0); + p4d_k = p4d_offset(pgd_k, 0); + + num_entries = p4d_index(EFI_VA_END); + memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries); + + /* * We share all the PUD entries apart from those that map the * EFI regions. Copy around them. */ BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0); BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0); - pgd_efi = efi_pgd + pgd_index(EFI_VA_END); - pud_efi = pud_offset(pgd_efi, 0); - - pgd_k = pgd_offset_k(EFI_VA_END); - pud_k = pud_offset(pgd_k, 0); + p4d_efi = p4d_offset(pgd_efi, EFI_VA_END); + p4d_k = p4d_offset(pgd_k, EFI_VA_END); + pud_efi = pud_offset(p4d_efi, 0); + pud_k = pud_offset(p4d_k, 0); num_entries = pud_index(EFI_VA_END); memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); - pud_efi = pud_offset(pgd_efi, EFI_VA_START); - pud_k = pud_offset(pgd_k, EFI_VA_START); + pud_efi = pud_offset(p4d_efi, EFI_VA_START); + pud_k = pud_offset(p4d_k, EFI_VA_START); num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START); memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 30031d5293c4..26615991d69c 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -11,6 +11,8 @@ #include <linux/bootmem.h> #include <linux/acpi.h> #include <linux/dmi.h> + +#include <asm/e820/api.h> #include <asm/efi.h> #include <asm/uv/uv.h> @@ -201,6 +203,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) return; } + /* No need to reserve regions that will never be freed. */ + if (md.attribute & EFI_MEMORY_RUNTIME) + return; + size += addr % EFI_PAGE_SIZE; size = round_up(size, EFI_PAGE_SIZE); addr = round_down(addr, EFI_PAGE_SIZE); @@ -240,14 +246,14 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) * else. We must only reserve (and then free) regions: * * - Not within any part of the kernel - * - Not the BIOS reserved area (E820_RESERVED, E820_NVS, etc) + * - Not the BIOS reserved area (E820_TYPE_RESERVED, E820_TYPE_NVS, etc) */ static bool can_free_region(u64 start, u64 size) { if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end)) return false; - if (!e820_all_mapped(start, start+size, E820_RAM)) + if (!e820__mapped_all(start, start+size, E820_TYPE_RAM)) return false; return true; @@ -280,7 +286,7 @@ void __init efi_reserve_boot_services(void) * A good example of a critical region that must not be * freed is page zero (first 4Kb of memory), which may * contain boot services code/data but is marked - * E820_RESERVED by trim_bios_range(). + * E820_TYPE_RESERVED by trim_bios_range(). */ if (!already_reserved) { memblock_reserve(start, size); diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile index 3dbde04febdc..53e0235e308f 100644 --- a/arch/x86/platform/intel-mid/device_libs/Makefile +++ b/arch/x86/platform/intel-mid/device_libs/Makefile @@ -2,8 +2,9 @@ obj-$(subst m,y,$(CONFIG_PINCTRL_MERRIFIELD)) += platform_mrfld_pinctrl.o # SDHCI Devices obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += platform_mrfld_sd.o -# WiFi +# WiFi + BT obj-$(subst m,y,$(CONFIG_BRCMFMAC_SDIO)) += platform_bcm43xx.o +obj-$(subst m,y,$(CONFIG_BT_HCIUART_BCM)) += platform_bt.o # IPC Devices obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic.o obj-$(subst m,y,$(CONFIG_SND_MFLD_MACHINE)) += platform_msic_audio.o diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c new file mode 100644 index 000000000000..5a0483e7bf66 --- /dev/null +++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c @@ -0,0 +1,108 @@ +/* + * Bluetooth platform data initialization file + * + * (C) Copyright 2017 Intel Corporation + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include <linux/gpio/machine.h> +#include <linux/pci.h> +#include <linux/platform_device.h> + +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> +#include <asm/intel-mid.h> + +struct bt_sfi_data { + struct device *dev; + const char *name; + int (*setup)(struct bt_sfi_data *ddata); +}; + +static struct gpiod_lookup_table tng_bt_sfi_gpio_table = { + .dev_id = "hci_bcm", + .table = { + GPIO_LOOKUP("0000:00:0c.0", -1, "device-wakeup", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("0000:00:0c.0", -1, "shutdown", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("0000:00:0c.0", -1, "host-wakeup", GPIO_ACTIVE_HIGH), + { }, + }, +}; + +#define TNG_BT_SFI_GPIO_DEVICE_WAKEUP "bt_wakeup" +#define TNG_BT_SFI_GPIO_SHUTDOWN "BT-reset" +#define TNG_BT_SFI_GPIO_HOST_WAKEUP "bt_uart_enable" + +static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata) +{ + struct gpiod_lookup_table *table = &tng_bt_sfi_gpio_table; + struct gpiod_lookup *lookup = table->table; + struct pci_dev *pdev; + + /* Connected to /dev/ttyS0 */ + pdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(4, 1)); + if (!pdev) + return -ENODEV; + + ddata->dev = &pdev->dev; + ddata->name = table->dev_id; + + lookup[0].chip_hwnum = get_gpio_by_name(TNG_BT_SFI_GPIO_DEVICE_WAKEUP); + lookup[1].chip_hwnum = get_gpio_by_name(TNG_BT_SFI_GPIO_SHUTDOWN); + lookup[2].chip_hwnum = get_gpio_by_name(TNG_BT_SFI_GPIO_HOST_WAKEUP); + + gpiod_add_lookup_table(table); + return 0; +} + +static struct bt_sfi_data tng_bt_sfi_data __initdata = { + .setup = tng_bt_sfi_setup, +}; + +#define ICPU(model, ddata) \ + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata } + +static const struct x86_cpu_id bt_sfi_cpu_ids[] = { + ICPU(INTEL_FAM6_ATOM_MERRIFIELD, tng_bt_sfi_data), + {} +}; + +static int __init bt_sfi_init(void) +{ + struct platform_device_info info; + struct platform_device *pdev; + const struct x86_cpu_id *id; + struct bt_sfi_data *ddata; + int ret; + + id = x86_match_cpu(bt_sfi_cpu_ids); + if (!id) + return -ENODEV; + + ddata = (struct bt_sfi_data *)id->driver_data; + if (!ddata) + return -ENODEV; + + ret = ddata->setup(ddata); + if (ret) + return ret; + + memset(&info, 0, sizeof(info)); + info.fwnode = ddata->dev->fwnode; + info.parent = ddata->dev; + info.name = ddata->name, + info.id = PLATFORM_DEVID_NONE, + + pdev = platform_device_register_full(&info); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + dev_info(ddata->dev, "Registered Bluetooth device: %s\n", ddata->name); + return 0; +} +device_initcall(bt_sfi_init); diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index f25982cdff90..42e65fee5673 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -23,28 +23,7 @@ #include <asm/irq_vectors.h> #include <asm/timer.h> -static struct bau_operations ops; - -static struct bau_operations uv123_bau_ops = { - .bau_gpa_to_offset = uv_gpa_to_offset, - .read_l_sw_ack = read_mmr_sw_ack, - .read_g_sw_ack = read_gmmr_sw_ack, - .write_l_sw_ack = write_mmr_sw_ack, - .write_g_sw_ack = write_gmmr_sw_ack, - .write_payload_first = write_mmr_payload_first, - .write_payload_last = write_mmr_payload_last, -}; - -static struct bau_operations uv4_bau_ops = { - .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram, - .read_l_sw_ack = read_mmr_proc_sw_ack, - .read_g_sw_ack = read_gmmr_proc_sw_ack, - .write_l_sw_ack = write_mmr_proc_sw_ack, - .write_g_sw_ack = write_gmmr_proc_sw_ack, - .write_payload_first = write_mmr_proc_payload_first, - .write_payload_last = write_mmr_proc_payload_last, -}; - +static struct bau_operations ops __ro_after_init; /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */ static int timeout_base_ns[] = { @@ -548,11 +527,12 @@ static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift) * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP */ static int uv1_wait_completion(struct bau_desc *bau_desc, - unsigned long mmr_offset, int right_shift, struct bau_control *bcp, long try) { unsigned long descriptor_status; cycles_t ttm; + u64 mmr_offset = bcp->status_mmr; + int right_shift = bcp->status_index; struct ptc_stats *stat = bcp->statp; descriptor_status = uv1_read_status(mmr_offset, right_shift); @@ -640,11 +620,12 @@ int handle_uv2_busy(struct bau_control *bcp) } static int uv2_3_wait_completion(struct bau_desc *bau_desc, - unsigned long mmr_offset, int right_shift, struct bau_control *bcp, long try) { unsigned long descriptor_stat; cycles_t ttm; + u64 mmr_offset = bcp->status_mmr; + int right_shift = bcp->status_index; int desc = bcp->uvhub_cpu; long busy_reps = 0; struct ptc_stats *stat = bcp->statp; @@ -706,28 +687,59 @@ static int uv2_3_wait_completion(struct bau_desc *bau_desc, } /* - * There are 2 status registers; each and array[32] of 2 bits. Set up for - * which register to read and position in that register based on cpu in - * current hub. + * Returns the status of current BAU message for cpu desc as a bit field + * [Error][Busy][Aux] */ -static int wait_completion(struct bau_desc *bau_desc, struct bau_control *bcp, long try) +static u64 read_status(u64 status_mmr, int index, int desc) { - int right_shift; - unsigned long mmr_offset; + u64 stat; + + stat = ((read_lmmr(status_mmr) >> index) & UV_ACT_STATUS_MASK) << 1; + stat |= (read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_2) >> desc) & 0x1; + + return stat; +} + +static int uv4_wait_completion(struct bau_desc *bau_desc, + struct bau_control *bcp, long try) +{ + struct ptc_stats *stat = bcp->statp; + u64 descriptor_stat; + u64 mmr = bcp->status_mmr; + int index = bcp->status_index; int desc = bcp->uvhub_cpu; - if (desc < UV_CPUS_PER_AS) { - mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; - right_shift = desc * UV_ACT_STATUS_SIZE; - } else { - mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; - right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE); - } + descriptor_stat = read_status(mmr, index, desc); - if (bcp->uvhub_version == 1) - return uv1_wait_completion(bau_desc, mmr_offset, right_shift, bcp, try); - else - return uv2_3_wait_completion(bau_desc, mmr_offset, right_shift, bcp, try); + /* spin on the status MMR, waiting for it to go idle */ + while (descriptor_stat != UV2H_DESC_IDLE) { + switch (descriptor_stat) { + case UV2H_DESC_SOURCE_TIMEOUT: + stat->s_stimeout++; + return FLUSH_GIVEUP; + + case UV2H_DESC_DEST_TIMEOUT: + stat->s_dtimeout++; + bcp->conseccompletes = 0; + return FLUSH_RETRY_TIMEOUT; + + case UV2H_DESC_DEST_STRONG_NACK: + stat->s_plugged++; + bcp->conseccompletes = 0; + return FLUSH_RETRY_PLUGGED; + + case UV2H_DESC_DEST_PUT_ERR: + bcp->conseccompletes = 0; + return FLUSH_GIVEUP; + + default: + /* descriptor_stat is still BUSY */ + cpu_relax(); + } + descriptor_stat = read_status(mmr, index, desc); + } + bcp->conseccompletes++; + return FLUSH_COMPLETE; } /* @@ -918,7 +930,7 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp, struct uv1_bau_msg_header *uv1_hdr = NULL; struct uv2_3_bau_msg_header *uv2_3_hdr = NULL; - if (bcp->uvhub_version == 1) { + if (bcp->uvhub_version == UV_BAU_V1) { uv1 = 1; uv1_throttle(hmaster, stat); } @@ -958,7 +970,7 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp, write_mmr_activation(index); try++; - completion_stat = wait_completion(bau_desc, bcp, try); + completion_stat = ops.wait_completion(bau_desc, bcp, try); handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat); @@ -1114,15 +1126,12 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, unsigned long end, unsigned int cpu) { - int locals = 0; - int remotes = 0; - int hubs = 0; + int locals = 0, remotes = 0, hubs = 0; struct bau_desc *bau_desc; struct cpumask *flush_mask; struct ptc_stats *stat; struct bau_control *bcp; - unsigned long descriptor_status; - unsigned long status; + unsigned long descriptor_status, status, address; bcp = &per_cpu(bau_control, cpu); @@ -1171,10 +1180,24 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, record_send_statistics(stat, locals, hubs, remotes, bau_desc); if (!end || (end - start) <= PAGE_SIZE) - bau_desc->payload.address = start; + address = start; else - bau_desc->payload.address = TLB_FLUSH_ALL; - bau_desc->payload.sending_cpu = cpu; + address = TLB_FLUSH_ALL; + + switch (bcp->uvhub_version) { + case UV_BAU_V1: + case UV_BAU_V2: + case UV_BAU_V3: + bau_desc->payload.uv1_2_3.address = address; + bau_desc->payload.uv1_2_3.sending_cpu = cpu; + break; + case UV_BAU_V4: + bau_desc->payload.uv4.address = address; + bau_desc->payload.uv4.sending_cpu = cpu; + bau_desc->payload.uv4.qualifier = BAU_DESC_QUALIFIER; + break; + } + /* * uv_flush_send_and_wait returns 0 if all cpu's were messaged, * or 1 if it gave up and the original cpumask should be returned. @@ -1296,7 +1319,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) msgdesc.msg_slot = msg - msgdesc.queue_first; msgdesc.msg = msg; - if (bcp->uvhub_version == 2) + if (bcp->uvhub_version == UV_BAU_V2) process_uv2_message(&msgdesc, bcp); else /* no error workaround for uv1 or uv3 */ @@ -1838,7 +1861,7 @@ static void pq_init(int node, int pnode) * and the payload queue tail must be maintained by the kernel. */ bcp = &per_cpu(bau_control, smp_processor_id()); - if (bcp->uvhub_version <= 3) { + if (bcp->uvhub_version <= UV_BAU_V3) { tail = first; gnode = uv_gpa_to_gnode(uv_gpa(pqp)); first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail; @@ -2034,8 +2057,7 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp, struct bau_control **smasterp, struct bau_control **hmasterp) { - int i; - int cpu; + int i, cpu, uvhub_cpu; struct bau_control *bcp; for (i = 0; i < sdp->num_cpus; i++) { @@ -2052,19 +2074,33 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp, bcp->socket_master = *smasterp; bcp->uvhub = bdp->uvhub; if (is_uv1_hub()) - bcp->uvhub_version = 1; + bcp->uvhub_version = UV_BAU_V1; else if (is_uv2_hub()) - bcp->uvhub_version = 2; + bcp->uvhub_version = UV_BAU_V2; else if (is_uv3_hub()) - bcp->uvhub_version = 3; + bcp->uvhub_version = UV_BAU_V3; else if (is_uv4_hub()) - bcp->uvhub_version = 4; + bcp->uvhub_version = UV_BAU_V4; else { pr_emerg("uvhub version not 1, 2, 3, or 4\n"); return 1; } bcp->uvhub_master = *hmasterp; - bcp->uvhub_cpu = uv_cpu_blade_processor_id(cpu); + uvhub_cpu = uv_cpu_blade_processor_id(cpu); + bcp->uvhub_cpu = uvhub_cpu; + + /* + * The ERROR and BUSY status registers are located pairwise over + * the STATUS_0 and STATUS_1 mmrs; each an array[32] of 2 bits. + */ + if (uvhub_cpu < UV_CPUS_PER_AS) { + bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; + bcp->status_index = uvhub_cpu * UV_ACT_STATUS_SIZE; + } else { + bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; + bcp->status_index = (uvhub_cpu - UV_CPUS_PER_AS) + * UV_ACT_STATUS_SIZE; + } if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { pr_emerg("%d cpus per uvhub invalid\n", @@ -2147,6 +2183,39 @@ fail: return 1; } +static const struct bau_operations uv1_bau_ops __initconst = { + .bau_gpa_to_offset = uv_gpa_to_offset, + .read_l_sw_ack = read_mmr_sw_ack, + .read_g_sw_ack = read_gmmr_sw_ack, + .write_l_sw_ack = write_mmr_sw_ack, + .write_g_sw_ack = write_gmmr_sw_ack, + .write_payload_first = write_mmr_payload_first, + .write_payload_last = write_mmr_payload_last, + .wait_completion = uv1_wait_completion, +}; + +static const struct bau_operations uv2_3_bau_ops __initconst = { + .bau_gpa_to_offset = uv_gpa_to_offset, + .read_l_sw_ack = read_mmr_sw_ack, + .read_g_sw_ack = read_gmmr_sw_ack, + .write_l_sw_ack = write_mmr_sw_ack, + .write_g_sw_ack = write_gmmr_sw_ack, + .write_payload_first = write_mmr_payload_first, + .write_payload_last = write_mmr_payload_last, + .wait_completion = uv2_3_wait_completion, +}; + +static const struct bau_operations uv4_bau_ops __initconst = { + .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram, + .read_l_sw_ack = read_mmr_proc_sw_ack, + .read_g_sw_ack = read_gmmr_proc_sw_ack, + .write_l_sw_ack = write_mmr_proc_sw_ack, + .write_g_sw_ack = write_gmmr_proc_sw_ack, + .write_payload_first = write_mmr_proc_payload_first, + .write_payload_last = write_mmr_proc_payload_last, + .wait_completion = uv4_wait_completion, +}; + /* * Initialization of BAU-related structures */ @@ -2166,11 +2235,11 @@ static int __init uv_bau_init(void) if (is_uv4_hub()) ops = uv4_bau_ops; else if (is_uv3_hub()) - ops = uv123_bau_ops; + ops = uv2_3_bau_ops; else if (is_uv2_hub()) - ops = uv123_bau_ops; + ops = uv2_3_bau_ops; else if (is_uv1_hub()) - ops = uv123_bau_ops; + ops = uv1_bau_ops; for_each_possible_cpu(cur_cpu) { mask = &per_cpu(uv_flush_tlb_mask, cur_cpu); diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index 2ee7632d4916..b082d71b08ee 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c @@ -390,9 +390,11 @@ static __init int uv_rtc_setup_clock(void) clock_event_device_uv.min_delta_ns = NSEC_PER_SEC / sn_rtc_cycles_per_second; + clock_event_device_uv.min_delta_ticks = 1; clock_event_device_uv.max_delta_ns = clocksource_uv.mask * (NSEC_PER_SEC / sn_rtc_cycles_per_second); + clock_event_device_uv.max_delta_ticks = clocksource_uv.mask; rc = schedule_on_each_cpu(uv_rtc_register_clockevents); if (rc) { |