aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Kconfig324
-rw-r--r--arch/sparc64/kernel/Makefile2
-rw-r--r--arch/sparc64/kernel/entry.S619
-rw-r--r--arch/sparc64/kernel/head.S3
-rw-r--r--arch/sparc64/kernel/irq.c20
-rw-r--r--arch/sparc64/kernel/pci.c4
-rw-r--r--arch/sparc64/kernel/pci_iommu.c2
-rw-r--r--arch/sparc64/kernel/power.c2
-rw-r--r--arch/sparc64/kernel/process.c8
-rw-r--r--arch/sparc64/kernel/rtrap.S13
-rw-r--r--arch/sparc64/kernel/sbus.c2
-rw-r--r--arch/sparc64/kernel/setup.c13
-rw-r--r--arch/sparc64/kernel/signal.c11
-rw-r--r--arch/sparc64/kernel/signal32.c33
-rw-r--r--arch/sparc64/kernel/smp.c36
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c35
-rw-r--r--arch/sparc64/kernel/systbls.S8
-rw-r--r--arch/sparc64/kernel/traps.c274
-rw-r--r--arch/sparc64/kernel/ttable.S27
-rw-r--r--arch/sparc64/kernel/una_asm.S153
-rw-r--r--arch/sparc64/kernel/unaligned.c279
-rw-r--r--arch/sparc64/kernel/us2e_cpufreq.c36
-rw-r--r--arch/sparc64/kernel/us3_cpufreq.c29
-rw-r--r--arch/sparc64/kernel/winfixup.S6
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/PeeCeeI.c77
-rw-r--r--arch/sparc64/lib/copy_page.S13
-rw-r--r--arch/sparc64/lib/debuglocks.c56
-rw-r--r--arch/sparc64/lib/mb.S73
-rw-r--r--arch/sparc64/mm/generic.c31
-rw-r--r--arch/sparc64/mm/init.c23
-rw-r--r--arch/sparc64/mm/ultra.S39
-rw-r--r--arch/sparc64/solaris/misc.c6
-rw-r--r--arch/sparc64/solaris/socket.c193
34 files changed, 1225 insertions, 1227 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 6a4733683f0f..73ec6aec5ed5 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -5,6 +5,16 @@
mainmenu "Linux/UltraSPARC Kernel Configuration"
+config SPARC64
+ bool
+ default y
+ help
+ SPARC is a family of RISC microprocessors designed and marketed by
+ Sun Microsystems, incorporated. This port covers the newer 64-bit
+ UltraSPARC. The UltraLinux project maintains both the SPARC32 and
+ SPARC64 ports; its web page is available at
+ <http://www.ultralinux.org/>.
+
config 64BIT
def_bool y
@@ -71,75 +81,6 @@ config SYSVIPC_COMPAT
menu "General machine setup"
-config BBC_I2C
- tristate "UltraSPARC-III bootbus i2c controller driver"
- depends on PCI
- help
- The BBC devices on the UltraSPARC III have two I2C controllers. The
- first I2C controller connects mainly to configuration PROMs (NVRAM,
- CPU configuration, DIMM types, etc.). The second I2C controller
- connects to environmental control devices such as fans and
- temperature sensors. The second controller also connects to the
- smartcard reader, if present. Say Y to enable support for these.
-
-config VT
- bool "Virtual terminal" if EMBEDDED
- select INPUT
- default y
- ---help---
- If you say Y here, you will get support for terminal devices with
- display and keyboard devices. These are called "virtual" because you
- can run several virtual terminals (also called virtual consoles) on
- one physical terminal. This is rather useful, for example one
- virtual terminal can collect system messages and warnings, another
- one can be used for a text-mode user session, and a third could run
- an X session, all in parallel. Switching between virtual terminals
- is done with certain key combinations, usually Alt-<function key>.
-
- The setterm command ("man setterm") can be used to change the
- properties (such as colors or beeping) of a virtual terminal. The
- man page console_codes(4) ("man console_codes") contains the special
- character sequences that can be used to change those properties
- directly. The fonts used on virtual terminals can be changed with
- the setfont ("man setfont") command and the key bindings are defined
- with the loadkeys ("man loadkeys") command.
-
- You need at least one virtual terminal device in order to make use
- of your keyboard and monitor. Therefore, only people configuring an
- embedded system would want to say N here in order to save some
- memory; the only way to log into such a system is then via a serial
- or network connection.
-
- If unsure, say Y, or else you won't be able to do much with your new
- shiny Linux system :-)
-
-config VT_CONSOLE
- bool "Support for console on virtual terminal" if EMBEDDED
- depends on VT
- default y
- ---help---
- The system console is the device which receives all kernel messages
- and warnings and which allows logins in single user mode. If you
- answer Y here, a virtual terminal (the device used to interact with
- a physical terminal) can be used as system console. This is the most
- common mode of operations, so you should say Y here unless you want
- the kernel messages be output only to a serial port (in which case
- you should say Y to "Console on serial port", below).
-
- If you do say Y here, by default the currently visible virtual
- terminal (/dev/tty0) will be used as system console. You can change
- that with a kernel command line option such as "console=tty3" which
- would use the third virtual terminal as system console. (Try "man
- bootparam" or see the documentation of your boot loader (lilo or
- loadlin) about how to pass options to the kernel at boot time.)
-
- If unsure, say Y.
-
-config HW_CONSOLE
- bool
- depends on VT
- default y
-
config SMP
bool "Symmetric multi-processing support"
---help---
@@ -205,17 +146,6 @@ config US2E_FREQ
If in doubt, say N.
-# Identify this as a Sparc64 build
-config SPARC64
- bool
- default y
- help
- SPARC is a family of RISC microprocessors designed and marketed by
- Sun Microsystems, incorporated. This port covers the newer 64-bit
- UltraSPARC. The UltraLinux project maintains both the SPARC32 and
- SPARC64 ports; its web page is available at
- <http://www.ultralinux.org/>.
-
# Global things across all Sun machines.
config RWSEM_GENERIC_SPINLOCK
bool
@@ -246,6 +176,10 @@ config HUGETLB_PAGE_SIZE_64K
endchoice
+endmenu
+
+source "mm/Kconfig"
+
config GENERIC_ISA_DMA
bool
default y
@@ -344,33 +278,6 @@ config PCI_DOMAINS
bool
default PCI
-config RTC
- tristate
- depends on PCI
- default y
- ---help---
- If you say Y here and create a character special file /dev/rtc with
- major number 10 and minor number 135 using mknod ("man mknod"), you
- will get access to the real time clock (or hardware clock) built
- into your computer.
-
- Every PC has such a clock built in. It can be used to generate
- signals from as low as 1Hz up to 8192Hz, and can also be used
- as a 24 hour alarm. It reports status information via the file
- /proc/driver/rtc and its behaviour is set by various ioctls on
- /dev/rtc.
-
- If you run Linux on a multiprocessor machine and said Y to
- "Symmetric Multi Processing" above, you should say Y here to read
- and set the RTC in an SMP compatible fashion.
-
- If you think you have a use for such a device (such as periodic data
- sampling), then say Y here, and read <file:Documentation/rtc.txt>
- for details.
-
- To compile this driver as a module, choose M here: the
- module will be called rtc.
-
source "drivers/pci/Kconfig"
config SUN_OPENPROMFS
@@ -414,6 +321,8 @@ config BINFMT_AOUT32
If you want to run SunOS binaries (see SunOS binary emulation below)
or other a.out binaries, say Y. If unsure, say N.
+menu "Executable file formats"
+
source "fs/Kconfig.binfmt"
config SUNOS_EMUL
@@ -436,74 +345,7 @@ config SOLARIS_EMUL
To compile this code as a module, choose M here: the
module will be called solaris.
-source "drivers/parport/Kconfig"
-
-config PRINTER
- tristate "Parallel printer support"
- depends on PARPORT
- ---help---
- If you intend to attach a printer to the parallel port of your Linux
- box (as opposed to using a serial printer; if the connector at the
- printer has 9 or 25 holes ["female"], then it's serial), say Y.
- Also read the Printing-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- It is possible to share one parallel port among several devices
- (e.g. printer and ZIP drive) and it is safe to compile the
- corresponding drivers into the kernel.
- To compile this driver as a module, choose M here and read
- <file:Documentation/parport.txt>. The module will be called lp.
-
- If you have several parallel ports, you can specify which ports to
- use with the "lp" kernel command line option. (Try "man bootparam"
- or see the documentation of your boot loader (lilo or loadlin) about
- how to pass options to the kernel at boot time.) The syntax of the
- "lp" command line option can be found in <file:drivers/char/lp.c>.
-
- If you have more than 8 printers, you need to increase the LP_NO
- macro in lp.c and the PARPORT_MAX macro in parport.h.
-
-config PPDEV
- tristate "Support for user-space parallel port device drivers"
- depends on PARPORT
- ---help---
- Saying Y to this adds support for /dev/parport device nodes. This
- is needed for programs that want portable access to the parallel
- port, for instance deviceid (which displays Plug-and-Play device
- IDs).
-
- This is the parallel port equivalent of SCSI generic support (sg).
- It is safe to say N to this -- it is not needed for normal printing
- or parallel port CD-ROM/disk support.
-
- To compile this driver as a module, choose M here: the
- module will be called ppdev.
-
- If unsure, say N.
-
-config ENVCTRL
- tristate "SUNW, envctrl support"
- depends on PCI
- help
- Kernel support for temperature and fan monitoring on Sun SME
- machines.
-
- To compile this driver as a module, choose M here: the
- module will be called envctrl.
-
-config DISPLAY7SEG
- tristate "7-Segment Display support"
- depends on PCI
- ---help---
- This is the driver for the 7-segment display and LED present on
- Sun Microsystems CompactPCI models CP1400 and CP1500.
-
- To compile this driver as a module, choose M here: the
- module will be called display7seg.
-
- If you do not have a CompactPCI model CP1400 or CP1500, or
- another UltraSPARC-IIi-cEngine boardset with a 7-segment display,
- you should say N to this option.
+endmenu
config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
@@ -521,144 +363,16 @@ config CMDLINE
NOTE: This option WILL override the PROM bootargs setting!
-source "mm/Kconfig"
-
-endmenu
-
-source "drivers/base/Kconfig"
-
-source "drivers/video/Kconfig"
+source "net/Kconfig"
-source "drivers/serial/Kconfig"
+source "drivers/Kconfig"
source "drivers/sbus/char/Kconfig"
-source "drivers/mtd/Kconfig"
-
-source "drivers/block/Kconfig"
-
-source "drivers/ide/Kconfig"
-
-source "drivers/scsi/Kconfig"
-
source "drivers/fc4/Kconfig"
-source "drivers/md/Kconfig"
-
-if PCI
-source "drivers/message/fusion/Kconfig"
-endif
-
-source "drivers/ieee1394/Kconfig"
-
-source "net/Kconfig"
-
-source "drivers/isdn/Kconfig"
-
-source "drivers/telephony/Kconfig"
-
-# This one must be before the filesystem configs. -DaveM
-
-menu "Unix98 PTY support"
-
-config UNIX98_PTYS
- bool "Unix98 PTY support"
- ---help---
- A pseudo terminal (PTY) is a software device consisting of two
- halves: a master and a slave. The slave device behaves identical to
- a physical terminal; the master device is used by a process to
- read data from and write data to the slave, thereby emulating a
- terminal. Typical programs for the master side are telnet servers
- and xterms.
-
- Linux has traditionally used the BSD-like names /dev/ptyxx for
- masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
- has a number of problems. The GNU C library glibc 2.1 and later,
- however, supports the Unix98 naming standard: in order to acquire a
- pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
- terminal is then made available to the process and the pseudo
- terminal slave can be accessed as /dev/pts/<number>. What was
- traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
-
- The entries in /dev/pts/ are created on the fly by a virtual
- file system; therefore, if you say Y here you should say Y to
- "/dev/pts file system for Unix98 PTYs" as well.
-
- If you want to say Y here, you need to have the C library glibc 2.1
- or later (equal to libc-6.1, check with "ls -l /lib/libc.so.*").
- Read the instructions in <file:Documentation/Changes> pertaining to
- pseudo terminals. It's safe to say N.
-
-config UNIX98_PTY_COUNT
- int "Maximum number of Unix98 PTYs in use (0-2048)"
- depends on UNIX98_PTYS
- default "256"
- help
- The maximum number of Unix98 PTYs that can be used at any one time.
- The default is 256, and should be enough for desktop systems. Server
- machines which support incoming telnet/rlogin/ssh connections and/or
- serve several X terminals may want to increase this: every incoming
- connection and every xterm uses up one PTY.
-
- When not in use, each additional set of 256 PTYs occupy
- approximately 8 KB of kernel memory on 32-bit architectures.
-
-endmenu
-
-menu "XFree86 DRI support"
-
-config DRM
- bool "Direct Rendering Manager (XFree86 DRI support)"
- help
- Kernel-level support for the Direct Rendering Infrastructure (DRI)
- introduced in XFree86 4.0. If you say Y here, you need to select
- the module that's right for your graphics card from the list below.
- These modules provide support for synchronization, security, and
- DMA transfers. Please see <http://dri.sourceforge.net/> for more
- details. You should also select and configure AGP
- (/dev/agpgart) support.
-
-config DRM_FFB
- tristate "Creator/Creator3D"
- depends on DRM && BROKEN
- help
- Choose this option if you have one of Sun's Creator3D-based graphics
- and frame buffer cards. Product page at
- <http://www.sun.com/desktop/products/Graphics/creator3d.html>.
-
-config DRM_TDFX
- tristate "3dfx Banshee/Voodoo3+"
- depends on DRM
- help
- Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
- graphics card. If M is selected, the module will be called tdfx.
-
-config DRM_R128
- tristate "ATI Rage 128"
- depends on DRM
- help
- Choose this option if you have an ATI Rage 128 graphics card. If M
- is selected, the module will be called r128. AGP support for
- this card is strongly suggested (unless you have a PCI version).
-
-endmenu
-
-source "drivers/input/Kconfig"
-
-source "drivers/i2c/Kconfig"
-
source "fs/Kconfig"
-source "drivers/media/Kconfig"
-
-source "sound/Kconfig"
-
-source "drivers/usb/Kconfig"
-
-source "drivers/infiniband/Kconfig"
-
-source "drivers/char/watchdog/Kconfig"
-
source "arch/sparc64/oprofile/Kconfig"
source "arch/sparc64/Kconfig.debug"
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 093281bdf85f..6f00ab8b9d23 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -8,7 +8,7 @@ EXTRA_CFLAGS := -Werror
extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o setup.o cpu.o idprom.o \
- traps.o devices.o auxio.o \
+ traps.o devices.o auxio.o una_asm.o \
irq.o ptrace.o time.o sys_sparc.o signal.o \
unaligned.o central.o pci.o starfire.o semaphore.o \
power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index d781f10adc52..3e0badb820c5 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -21,6 +21,7 @@
#include <asm/visasm.h>
#include <asm/estate.h>
#include <asm/auxio.h>
+#include <asm/sfafsr.h>
#define curptr g6
@@ -690,14 +691,159 @@ netbsd_syscall:
retl
nop
- /* These next few routines must be sure to clear the
- * SFSR FaultValid bit so that the fast tlb data protection
- * handler does not flush the wrong context and lock up the
- * box.
+ /* We need to carefully read the error status, ACK
+ * the errors, prevent recursive traps, and pass the
+ * information on to C code for logging.
+ *
+ * We pass the AFAR in as-is, and we encode the status
+ * information as described in asm-sparc64/sfafsr.h
+ */
+ .globl __spitfire_access_error
+__spitfire_access_error:
+ /* Disable ESTATE error reporting so that we do not
+ * take recursive traps and RED state the processor.
+ */
+ stxa %g0, [%g0] ASI_ESTATE_ERROR_EN
+ membar #Sync
+
+ mov UDBE_UE, %g1
+ ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
+
+ /* __spitfire_cee_trap branches here with AFSR in %g4 and
+ * UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the
+ * ESTATE Error Enable register.
+ */
+__spitfire_cee_trap_continue:
+ ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR
+
+ rdpr %tt, %g3
+ and %g3, 0x1ff, %g3 ! Paranoia
+ sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
+ or %g4, %g3, %g4
+ rdpr %tl, %g3
+ cmp %g3, 1
+ mov 1, %g3
+ bleu %xcc, 1f
+ sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
+
+ or %g4, %g3, %g4
+
+ /* Read in the UDB error register state, clearing the
+ * sticky error bits as-needed. We only clear them if
+ * the UE bit is set. Likewise, __spitfire_cee_trap
+ * below will only do so if the CE bit is set.
+ *
+ * NOTE: UltraSparc-I/II have high and low UDB error
+ * registers, corresponding to the two UDB units
+ * present on those chips. UltraSparc-IIi only
+ * has a single UDB, called "SDB" in the manual.
+ * For IIi the upper UDB register always reads
+ * as zero so for our purposes things will just
+ * work with the checks below.
+ */
+1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3
+ and %g3, 0x3ff, %g7 ! Paranoia
+ sllx %g7, SFSTAT_UDBH_SHIFT, %g7
+ or %g4, %g7, %g4
+ andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
+ be,pn %xcc, 1f
+ nop
+ stxa %g3, [%g0] ASI_UDB_ERROR_W
+ membar #Sync
+
+1: mov 0x18, %g3
+ ldxa [%g3] ASI_UDBL_ERROR_R, %g3
+ and %g3, 0x3ff, %g7 ! Paranoia
+ sllx %g7, SFSTAT_UDBL_SHIFT, %g7
+ or %g4, %g7, %g4
+ andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
+ be,pn %xcc, 1f
+ nop
+ mov 0x18, %g7
+ stxa %g3, [%g7] ASI_UDB_ERROR_W
+ membar #Sync
+
+1: /* Ok, now that we've latched the error state,
+ * clear the sticky bits in the AFSR.
+ */
+ stxa %g4, [%g0] ASI_AFSR
+ membar #Sync
+
+ rdpr %tl, %g2
+ cmp %g2, 1
+ rdpr %pil, %g2
+ bleu,pt %xcc, 1f
+ wrpr %g0, 15, %pil
+
+ ba,pt %xcc, etraptl1
+ rd %pc, %g7
+
+ ba,pt %xcc, 2f
+ nop
+
+1: ba,pt %xcc, etrap_irq
+ rd %pc, %g7
+
+2: mov %l4, %o1
+ mov %l5, %o2
+ call spitfire_access_error
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ /* This is the trap handler entry point for ECC correctable
+ * errors. They are corrected, but we listen for the trap
+ * so that the event can be logged.
+ *
+ * Disrupting errors are either:
+ * 1) single-bit ECC errors during UDB reads to system
+ * memory
+ * 2) data parity errors during write-back events
+ *
+ * As far as I can make out from the manual, the CEE trap
+ * is only for correctable errors during memory read
+ * accesses by the front-end of the processor.
+ *
+ * The code below is only for trap level 1 CEE events,
+ * as it is the only situation where we can safely record
+ * and log. For trap level >1 we just clear the CE bit
+ * in the AFSR and return.
+ *
+ * This is just like __spiftire_access_error above, but it
+ * specifically handles correctable errors. If an
+ * uncorrectable error is indicated in the AFSR we
+ * will branch directly above to __spitfire_access_error
+ * to handle it instead. Uncorrectable therefore takes
+ * priority over correctable, and the error logging
+ * C code will notice this case by inspecting the
+ * trap type.
+ */
+ .globl __spitfire_cee_trap
+__spitfire_cee_trap:
+ ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
+ mov 1, %g3
+ sllx %g3, SFAFSR_UE_SHIFT, %g3
+ andcc %g4, %g3, %g0 ! Check for UE
+ bne,pn %xcc, __spitfire_access_error
+ nop
+
+ /* Ok, in this case we only have a correctable error.
+ * Indicate we only wish to capture that state in register
+ * %g1, and we only disable CE error reporting unlike UE
+ * handling which disables all errors.
*/
- .globl __do_data_access_exception
- .globl __do_data_access_exception_tl1
-__do_data_access_exception_tl1:
+ ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3
+ andn %g3, ESTATE_ERR_CE, %g3
+ stxa %g3, [%g0] ASI_ESTATE_ERROR_EN
+ membar #Sync
+
+ /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
+ ba,pt %xcc, __spitfire_cee_trap_continue
+ mov UDBE_CE, %g1
+
+ .globl __spitfire_data_access_exception
+ .globl __spitfire_data_access_exception_tl1
+__spitfire_data_access_exception_tl1:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
@@ -706,9 +852,25 @@ __do_data_access_exception_tl1:
ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
membar #Sync
+ rdpr %tt, %g3
+ cmp %g3, 0x80 ! first win spill/fill trap
+ blu,pn %xcc, 1f
+ cmp %g3, 0xff ! last win spill/fill trap
+ bgu,pn %xcc, 1f
+ nop
ba,pt %xcc, winfix_dax
rdpr %tpc, %g3
-__do_data_access_exception:
+1: sethi %hi(109f), %g7
+ ba,pt %xcc, etraptl1
+109: or %g7, %lo(109b), %g7
+ mov %l4, %o1
+ mov %l5, %o2
+ call spitfire_data_access_exception_tl1
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+__spitfire_data_access_exception:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
@@ -722,20 +884,19 @@ __do_data_access_exception:
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
- call data_access_exception
+ call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
- .globl __do_instruction_access_exception
- .globl __do_instruction_access_exception_tl1
-__do_instruction_access_exception_tl1:
+ .globl __spitfire_insn_access_exception
+ .globl __spitfire_insn_access_exception_tl1
+__spitfire_insn_access_exception_tl1:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
- mov DMMU_SFAR, %g5
- ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
- ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
+ ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
+ rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
@@ -743,18 +904,17 @@ __do_instruction_access_exception_tl1:
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
- call instruction_access_exception_tl1
+ call spitfire_insn_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
-__do_instruction_access_exception:
+__spitfire_insn_access_exception:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
- mov DMMU_SFAR, %g5
- ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
- ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
+ ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
+ rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
@@ -762,235 +922,11 @@ __do_instruction_access_exception:
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
- call instruction_access_exception
+ call spitfire_insn_access_exception
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
- /* This is the trap handler entry point for ECC correctable
- * errors. They are corrected, but we listen for the trap
- * so that the event can be logged.
- *
- * Disrupting errors are either:
- * 1) single-bit ECC errors during UDB reads to system
- * memory
- * 2) data parity errors during write-back events
- *
- * As far as I can make out from the manual, the CEE trap
- * is only for correctable errors during memory read
- * accesses by the front-end of the processor.
- *
- * The code below is only for trap level 1 CEE events,
- * as it is the only situation where we can safely record
- * and log. For trap level >1 we just clear the CE bit
- * in the AFSR and return.
- */
-
- /* Our trap handling infrastructure allows us to preserve
- * two 64-bit values during etrap for arguments to
- * subsequent C code. Therefore we encode the information
- * as follows:
- *
- * value 1) Full 64-bits of AFAR
- * value 2) Low 33-bits of AFSR, then bits 33-->42
- * are UDBL error status and bits 43-->52
- * are UDBH error status
- */
- .align 64
- .globl cee_trap
-cee_trap:
- ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR
- ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR
- sllx %g1, 31, %g1 ! Clear reserved bits
- srlx %g1, 31, %g1 ! in AFSR
-
- /* NOTE: UltraSparc-I/II have high and low UDB error
- * registers, corresponding to the two UDB units
- * present on those chips. UltraSparc-IIi only
- * has a single UDB, called "SDB" in the manual.
- * For IIi the upper UDB register always reads
- * as zero so for our purposes things will just
- * work with the checks below.
- */
- ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status
- andcc %g3, (1 << 8), %g4 ! Check CE bit
- sllx %g3, (64 - 10), %g3 ! Clear reserved bits
- srlx %g3, (64 - 10), %g3 ! in UDB-Low error status
-
- sllx %g3, (33 + 0), %g3 ! Shift up to encoding area
- or %g1, %g3, %g1 ! Or it in
- be,pn %xcc, 1f ! Branch if CE bit was clear
- nop
- stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL
- membar #Sync ! Synchronize ASI stores
-1: mov 0x18, %g5 ! Addr of UDB-High error status
- ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it
-
- andcc %g3, (1 << 8), %g4 ! Check CE bit
- sllx %g3, (64 - 10), %g3 ! Clear reserved bits
- srlx %g3, (64 - 10), %g3 ! in UDB-High error status
- sllx %g3, (33 + 10), %g3 ! Shift up to encoding area
- or %g1, %g3, %g1 ! Or it in
- be,pn %xcc, 1f ! Branch if CE bit was clear
- nop
- nop
-
- stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH
- membar #Sync ! Synchronize ASI stores
-1: mov 1, %g5 ! AFSR CE bit is
- sllx %g5, 20, %g5 ! bit 20
- stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR
- membar #Sync ! Synchronize ASI stores
- sllx %g2, (64 - 41), %g2 ! Clear reserved bits
- srlx %g2, (64 - 41), %g2 ! in latched AFAR
-
- andn %g2, 0x0f, %g2 ! Finish resv bit clearing
- mov %g1, %g4 ! Move AFSR+UDB* into save reg
- mov %g2, %g5 ! Move AFAR into save reg
- rdpr %pil, %g2
- wrpr %g0, 15, %pil
- ba,pt %xcc, etrap_irq
- rd %pc, %g7
- mov %l4, %o0
-
- mov %l5, %o1
- call cee_log
- add %sp, PTREGS_OFF, %o2
- ba,a,pt %xcc, rtrap_irq
-
- /* Capture I/D/E-cache state into per-cpu error scoreboard.
- *
- * %g1: (TL>=0) ? 1 : 0
- * %g2: scratch
- * %g3: scratch
- * %g4: AFSR
- * %g5: AFAR
- * %g6: current thread ptr
- * %g7: scratch
- */
-#define CHEETAH_LOG_ERROR \
- /* Put "TL1" software bit into AFSR. */ \
- and %g1, 0x1, %g1; \
- sllx %g1, 63, %g2; \
- or %g4, %g2, %g4; \
- /* Get log entry pointer for this cpu at this trap level. */ \
- BRANCH_IF_JALAPENO(g2,g3,50f) \
- ldxa [%g0] ASI_SAFARI_CONFIG, %g2; \
- srlx %g2, 17, %g2; \
- ba,pt %xcc, 60f; \
- and %g2, 0x3ff, %g2; \
-50: ldxa [%g0] ASI_JBUS_CONFIG, %g2; \
- srlx %g2, 17, %g2; \
- and %g2, 0x1f, %g2; \
-60: sllx %g2, 9, %g2; \
- sethi %hi(cheetah_error_log), %g3; \
- ldx [%g3 + %lo(cheetah_error_log)], %g3; \
- brz,pn %g3, 80f; \
- nop; \
- add %g3, %g2, %g3; \
- sllx %g1, 8, %g1; \
- add %g3, %g1, %g1; \
- /* %g1 holds pointer to the top of the logging scoreboard */ \
- ldx [%g1 + 0x0], %g7; \
- cmp %g7, -1; \
- bne,pn %xcc, 80f; \
- nop; \
- stx %g4, [%g1 + 0x0]; \
- stx %g5, [%g1 + 0x8]; \
- add %g1, 0x10, %g1; \
- /* %g1 now points to D-cache logging area */ \
- set 0x3ff8, %g2; /* DC_addr mask */ \
- and %g5, %g2, %g2; /* DC_addr bits of AFAR */ \
- srlx %g5, 12, %g3; \
- or %g3, 1, %g3; /* PHYS tag + valid */ \
-10: ldxa [%g2] ASI_DCACHE_TAG, %g7; \
- cmp %g3, %g7; /* TAG match? */ \
- bne,pt %xcc, 13f; \
- nop; \
- /* Yep, what we want, capture state. */ \
- stx %g2, [%g1 + 0x20]; \
- stx %g7, [%g1 + 0x28]; \
- /* A membar Sync is required before and after utag access. */ \
- membar #Sync; \
- ldxa [%g2] ASI_DCACHE_UTAG, %g7; \
- membar #Sync; \
- stx %g7, [%g1 + 0x30]; \
- ldxa [%g2] ASI_DCACHE_SNOOP_TAG, %g7; \
- stx %g7, [%g1 + 0x38]; \
- clr %g3; \
-12: ldxa [%g2 + %g3] ASI_DCACHE_DATA, %g7; \
- stx %g7, [%g1]; \
- add %g3, (1 << 5), %g3; \
- cmp %g3, (4 << 5); \
- bl,pt %xcc, 12b; \
- add %g1, 0x8, %g1; \
- ba,pt %xcc, 20f; \
- add %g1, 0x20, %g1; \
-13: sethi %hi(1 << 14), %g7; \
- add %g2, %g7, %g2; \
- srlx %g2, 14, %g7; \
- cmp %g7, 4; \
- bl,pt %xcc, 10b; \
- nop; \
- add %g1, 0x40, %g1; \
-20: /* %g1 now points to I-cache logging area */ \
- set 0x1fe0, %g2; /* IC_addr mask */ \
- and %g5, %g2, %g2; /* IC_addr bits of AFAR */ \
- sllx %g2, 1, %g2; /* IC_addr[13:6]==VA[12:5] */ \
- srlx %g5, (13 - 8), %g3; /* Make PTAG */ \
- andn %g3, 0xff, %g3; /* Mask off undefined bits */ \
-21: ldxa [%g2] ASI_IC_TAG, %g7; \
- andn %g7, 0xff, %g7; \
- cmp %g3, %g7; \
- bne,pt %xcc, 23f; \
- nop; \
- /* Yep, what we want, capture state. */ \
- stx %g2, [%g1 + 0x40]; \
- stx %g7, [%g1 + 0x48]; \
- add %g2, (1 << 3), %g2; \
- ldxa [%g2] ASI_IC_TAG, %g7; \
- add %g2, (1 << 3), %g2; \
- stx %g7, [%g1 + 0x50]; \
- ldxa [%g2] ASI_IC_TAG, %g7; \
- add %g2, (1 << 3), %g2; \
- stx %g7, [%g1 + 0x60]; \
- ldxa [%g2] ASI_IC_TAG, %g7; \
- stx %g7, [%g1 + 0x68]; \
- sub %g2, (3 << 3), %g2; \
- ldxa [%g2] ASI_IC_STAG, %g7; \
- stx %g7, [%g1 + 0x58]; \
- clr %g3; \
- srlx %g2, 2, %g2; \
-22: ldxa [%g2 + %g3] ASI_IC_INSTR, %g7; \
- stx %g7, [%g1]; \
- add %g3, (1 << 3), %g3; \
- cmp %g3, (8 << 3); \
- bl,pt %xcc, 22b; \
- add %g1, 0x8, %g1; \
- ba,pt %xcc, 30f; \
- add %g1, 0x30, %g1; \
-23: sethi %hi(1 << 14), %g7; \
- add %g2, %g7, %g2; \
- srlx %g2, 14, %g7; \
- cmp %g7, 4; \
- bl,pt %xcc, 21b; \
- nop; \
- add %g1, 0x70, %g1; \
-30: /* %g1 now points to E-cache logging area */ \
- andn %g5, (32 - 1), %g2; /* E-cache subblock */ \
- stx %g2, [%g1 + 0x20]; \
- ldxa [%g2] ASI_EC_TAG_DATA, %g7; \
- stx %g7, [%g1 + 0x28]; \
- ldxa [%g2] ASI_EC_R, %g0; \
- clr %g3; \
-31: ldxa [%g3] ASI_EC_DATA, %g7; \
- stx %g7, [%g1 + %g3]; \
- add %g3, 0x8, %g3; \
- cmp %g3, 0x20; \
- bl,pt %xcc, 31b; \
- nop; \
-80: /* DONE */
-
/* These get patched into the trap table at boot time
* once we know we have a cheetah processor.
*/
@@ -1227,6 +1163,170 @@ dcpe_icpe_tl1_common:
membar #Sync
retry
+ /* Capture I/D/E-cache state into per-cpu error scoreboard.
+ *
+ * %g1: (TL>=0) ? 1 : 0
+ * %g2: scratch
+ * %g3: scratch
+ * %g4: AFSR
+ * %g5: AFAR
+ * %g6: current thread ptr
+ * %g7: scratch
+ */
+__cheetah_log_error:
+ /* Put "TL1" software bit into AFSR. */
+ and %g1, 0x1, %g1
+ sllx %g1, 63, %g2
+ or %g4, %g2, %g4
+
+ /* Get log entry pointer for this cpu at this trap level. */
+ BRANCH_IF_JALAPENO(g2,g3,50f)
+ ldxa [%g0] ASI_SAFARI_CONFIG, %g2
+ srlx %g2, 17, %g2
+ ba,pt %xcc, 60f
+ and %g2, 0x3ff, %g2
+
+50: ldxa [%g0] ASI_JBUS_CONFIG, %g2
+ srlx %g2, 17, %g2
+ and %g2, 0x1f, %g2
+
+60: sllx %g2, 9, %g2
+ sethi %hi(cheetah_error_log), %g3
+ ldx [%g3 + %lo(cheetah_error_log)], %g3
+ brz,pn %g3, 80f
+ nop
+
+ add %g3, %g2, %g3
+ sllx %g1, 8, %g1
+ add %g3, %g1, %g1
+
+ /* %g1 holds pointer to the top of the logging scoreboard */
+ ldx [%g1 + 0x0], %g7
+ cmp %g7, -1
+ bne,pn %xcc, 80f
+ nop
+
+ stx %g4, [%g1 + 0x0]
+ stx %g5, [%g1 + 0x8]
+ add %g1, 0x10, %g1
+
+ /* %g1 now points to D-cache logging area */
+ set 0x3ff8, %g2 /* DC_addr mask */
+ and %g5, %g2, %g2 /* DC_addr bits of AFAR */
+ srlx %g5, 12, %g3
+ or %g3, 1, %g3 /* PHYS tag + valid */
+
+10: ldxa [%g2] ASI_DCACHE_TAG, %g7
+ cmp %g3, %g7 /* TAG match? */
+ bne,pt %xcc, 13f
+ nop
+
+ /* Yep, what we want, capture state. */
+ stx %g2, [%g1 + 0x20]
+ stx %g7, [%g1 + 0x28]
+
+ /* A membar Sync is required before and after utag access. */
+ membar #Sync
+ ldxa [%g2] ASI_DCACHE_UTAG, %g7
+ membar #Sync
+ stx %g7, [%g1 + 0x30]
+ ldxa [%g2] ASI_DCACHE_SNOOP_TAG, %g7
+ stx %g7, [%g1 + 0x38]
+ clr %g3
+
+12: ldxa [%g2 + %g3] ASI_DCACHE_DATA, %g7
+ stx %g7, [%g1]
+ add %g3, (1 << 5), %g3
+ cmp %g3, (4 << 5)
+ bl,pt %xcc, 12b
+ add %g1, 0x8, %g1
+
+ ba,pt %xcc, 20f
+ add %g1, 0x20, %g1
+
+13: sethi %hi(1 << 14), %g7
+ add %g2, %g7, %g2
+ srlx %g2, 14, %g7
+ cmp %g7, 4
+ bl,pt %xcc, 10b
+ nop
+
+ add %g1, 0x40, %g1
+
+ /* %g1 now points to I-cache logging area */
+20: set 0x1fe0, %g2 /* IC_addr mask */
+ and %g5, %g2, %g2 /* IC_addr bits of AFAR */
+ sllx %g2, 1, %g2 /* IC_addr[13:6]==VA[12:5] */
+ srlx %g5, (13 - 8), %g3 /* Make PTAG */
+ andn %g3, 0xff, %g3 /* Mask off undefined bits */
+
+21: ldxa [%g2] ASI_IC_TAG, %g7
+ andn %g7, 0xff, %g7
+ cmp %g3, %g7
+ bne,pt %xcc, 23f
+ nop
+
+ /* Yep, what we want, capture state. */
+ stx %g2, [%g1 + 0x40]
+ stx %g7, [%g1 + 0x48]
+ add %g2, (1 << 3), %g2
+ ldxa [%g2] ASI_IC_TAG, %g7
+ add %g2, (1 << 3), %g2
+ stx %g7, [%g1 + 0x50]
+ ldxa [%g2] ASI_IC_TAG, %g7
+ add %g2, (1 << 3), %g2
+ stx %g7, [%g1 + 0x60]
+ ldxa [%g2] ASI_IC_TAG, %g7
+ stx %g7, [%g1 + 0x68]
+ sub %g2, (3 << 3), %g2
+ ldxa [%g2] ASI_IC_STAG, %g7
+ stx %g7, [%g1 + 0x58]
+ clr %g3
+ srlx %g2, 2, %g2
+
+22: ldxa [%g2 + %g3] ASI_IC_INSTR, %g7
+ stx %g7, [%g1]
+ add %g3, (1 << 3), %g3
+ cmp %g3, (8 << 3)
+ bl,pt %xcc, 22b
+ add %g1, 0x8, %g1
+
+ ba,pt %xcc, 30f
+ add %g1, 0x30, %g1
+
+23: sethi %hi(1 << 14), %g7
+ add %g2, %g7, %g2
+ srlx %g2, 14, %g7
+ cmp %g7, 4
+ bl,pt %xcc, 21b
+ nop
+
+ add %g1, 0x70, %g1
+
+ /* %g1 now points to E-cache logging area */
+30: andn %g5, (32 - 1), %g2
+ stx %g2, [%g1 + 0x20]
+ ldxa [%g2] ASI_EC_TAG_DATA, %g7
+ stx %g7, [%g1 + 0x28]
+ ldxa [%g2] ASI_EC_R, %g0
+ clr %g3
+
+31: ldxa [%g3] ASI_EC_DATA, %g7
+ stx %g7, [%g1 + %g3]
+ add %g3, 0x8, %g3
+ cmp %g3, 0x20
+
+ bl,pt %xcc, 31b
+ nop
+80:
+ rdpr %tt, %g2
+ cmp %g2, 0x70
+ be c_fast_ecc
+ cmp %g2, 0x63
+ be c_cee
+ nop
+ ba,pt %xcc, c_deferred
+
/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
* in the trap table. That code has done a memory barrier
* and has disabled both the I-cache and D-cache in the DCU
@@ -1252,8 +1352,10 @@ cheetah_fast_ecc:
stxa %g4, [%g0] ASI_AFSR
membar #Sync
- CHEETAH_LOG_ERROR
+ ba,pt %xcc, __cheetah_log_error
+ nop
+c_fast_ecc:
rdpr %pil, %g2
wrpr %g0, 15, %pil
ba,pt %xcc, etrap_irq
@@ -1278,8 +1380,10 @@ cheetah_cee:
stxa %g4, [%g0] ASI_AFSR
membar #Sync
- CHEETAH_LOG_ERROR
+ ba,pt %xcc, __cheetah_log_error
+ nop
+c_cee:
rdpr %pil, %g2
wrpr %g0, 15, %pil
ba,pt %xcc, etrap_irq
@@ -1304,8 +1408,10 @@ cheetah_deferred_trap:
stxa %g4, [%g0] ASI_AFSR
membar #Sync
- CHEETAH_LOG_ERROR
+ ba,pt %xcc, __cheetah_log_error
+ nop
+c_deferred:
rdpr %pil, %g2
wrpr %g0, 15, %pil
ba,pt %xcc, etrap_irq
@@ -1600,11 +1706,11 @@ sys_clone: flushw
ba,pt %xcc, sparc_do_fork
add %sp, PTREGS_OFF, %o2
ret_from_syscall:
- /* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in
- * %o7 for us. Check performance counter stuff too.
+ /* Clear current_thread_info()->new_child, and
+ * check performance counter stuff too.
*/
- andn %o7, _TIF_NEWCHILD, %l0
- stx %l0, [%g6 + TI_FLAGS]
+ stb %g0, [%g6 + TI_NEW_CHILD]
+ ldx [%g6 + TI_FLAGS], %l0
call schedule_tail
mov %g7, %o0
andcc %l0, _TIF_PERFCTR, %g0
@@ -1720,12 +1826,11 @@ ret_sys_call:
/* Check if force_successful_syscall_return()
* was invoked.
*/
- ldx [%curptr + TI_FLAGS], %l0
- andcc %l0, _TIF_SYSCALL_SUCCESS, %g0
- be,pt %icc, 1f
- andn %l0, _TIF_SYSCALL_SUCCESS, %l0
+ ldub [%curptr + TI_SYS_NOERROR], %l0
+ brz,pt %l0, 1f
+ nop
ba,pt %xcc, 80f
- stx %l0, [%curptr + TI_FLAGS]
+ stb %g0, [%curptr + TI_SYS_NOERROR]
1:
cmp %o0, -ERESTART_RESTARTBLOCK
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 8104a56ca2d8..1fa06c4e3bdb 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -538,11 +538,12 @@ cheetah_tlb_fixup:
nop
call cheetah_plus_patch_winfixup
nop
-
2: /* Patch copy/page operations to cheetah optimized versions. */
call cheetah_patch_copyops
nop
+ call cheetah_patch_copy_page
+ nop
call cheetah_patch_cachetlbops
nop
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index daa2fb93052c..c9b69167632a 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -782,8 +782,14 @@ static void distribute_irqs(void)
}
#endif
+struct sun5_timer {
+ u64 count0;
+ u64 limit0;
+ u64 count1;
+ u64 limit1;
+};
-struct sun5_timer *prom_timers;
+static struct sun5_timer *prom_timers;
static u64 prom_limit0, prom_limit1;
static void map_prom_timers(void)
@@ -839,18 +845,6 @@ static void kill_prom_timer(void)
: "g1", "g2");
}
-void enable_prom_timer(void)
-{
- if (!prom_timers)
- return;
-
- /* Set it to whatever was there before. */
- prom_timers->limit1 = prom_limit1;
- prom_timers->count1 = 0;
- prom_timers->limit0 = prom_limit0;
- prom_timers->count0 = 0;
-}
-
void init_irqwork_curcpu(void)
{
register struct irq_work_struct *workp asm("o2");
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index bba140d98b1b..ec8bf4012c0c 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -540,6 +540,7 @@ void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
pbm->parent->resource_adjust(pdev, res, root);
}
+EXPORT_SYMBOL(pcibios_bus_to_resource);
char * __init pcibios_setup(char *str)
{
@@ -735,8 +736,7 @@ static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma
static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
- /* Our io_remap_page_range/io_remap_pfn_range takes care of this,
- do nothing. */
+ /* Our io_remap_pfn_range takes care of this, do nothing. */
}
/* Perform the actual remap of the pages for a PCI device mapping, as appropriate
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 2803bc7c2c79..425c60cfea19 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -466,7 +466,7 @@ do_flush_sync:
if (!limit)
break;
udelay(1);
- membar("#LoadLoad");
+ rmb();
}
if (!limit)
printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 533104c7907d..946cee0257ea 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -69,8 +69,6 @@ void machine_power_off(void)
machine_halt();
}
-EXPORT_SYMBOL(machine_power_off);
-
#ifdef CONFIG_PCI
static int powerd(void *__unused)
{
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index a0cd2b2494d6..66255434128a 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -103,7 +103,7 @@ void cpu_idle(void)
* other cpus see our increasing idleness for the buddy
* redistribution algorithm. -DaveM
*/
- membar("#StoreStore | #StoreLoad");
+ membar_storeload_storestore();
}
}
@@ -124,8 +124,6 @@ void machine_halt(void)
panic("Halt failed!");
}
-EXPORT_SYMBOL(machine_halt);
-
void machine_alt_power_off(void)
{
if (!serial_console && prom_palette)
@@ -154,8 +152,6 @@ void machine_restart(char * cmd)
panic("Reboot failed!");
}
-EXPORT_SYMBOL(machine_restart);
-
static void show_regwindow32(struct pt_regs *regs)
{
struct reg_window32 __user *rw;
@@ -621,8 +617,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
- _TIF_NEWCHILD |
(((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
+ t->new_child = 1;
t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf));
t->fpsaved[0] = 0;
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 0696ed4b9d64..fafd227735fa 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -153,11 +153,14 @@ __handle_signal:
rtrap_irq:
rtrap_clr_l6: clr %l6
rtrap:
- ldub [%g6 + TI_CPU], %l0
- sethi %hi(irq_stat), %l2 ! &softirq_active
- or %l2, %lo(irq_stat), %l2 ! &softirq_active
-irqsz_patchme: sllx %l0, 0, %l0
- lduw [%l2 + %l0], %l1 ! softirq_pending
+#ifndef CONFIG_SMP
+ sethi %hi(per_cpu____cpu_data), %l0
+ lduw [%l0 + %lo(per_cpu____cpu_data)], %l1
+#else
+ sethi %hi(per_cpu____cpu_data), %l0
+ or %l0, %lo(per_cpu____cpu_data), %l0
+ lduw [%l0 + %g5], %l1
+#endif
cmp %l1, 0
/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index 89f5e019f24c..e09ddf927655 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -147,7 +147,7 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long
if (!limit)
break;
udelay(1);
- membar("#LoadLoad");
+ rmb();
}
if (!limit)
printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index b7e6a91952b2..ddbed3341a23 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -33,7 +33,6 @@
#include <linux/cpu.h>
#include <linux/initrd.h>
-#include <asm/segment.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -512,18 +511,6 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &prom_con;
#endif
-#ifdef CONFIG_SMP
- i = (unsigned long)&irq_stat[1] - (unsigned long)&irq_stat[0];
- if ((i == SMP_CACHE_BYTES) || (i == (2 * SMP_CACHE_BYTES))) {
- extern unsigned int irqsz_patchme[1];
- irqsz_patchme[0] |= ((i == SMP_CACHE_BYTES) ? SMP_CACHE_BYTES_SHIFT : \
- SMP_CACHE_BYTES_SHIFT + 1);
- flushi((long)&irqsz_patchme[0]);
- } else {
- prom_printf("Unexpected size of irq_stat[] elements\n");
- prom_halt();
- }
-#endif
/* Work out if we are starfire early on */
check_if_starfire();
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index b27934671c35..60f5dfabb1e1 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -574,13 +574,12 @@ static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
{
setup_rt_frame(ka, regs, signr, oldset,
(ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
- if (!(ka->sa.sa_flags & SA_NOMASK)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(&current->blocked,signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index f28428f4170e..aecccd0df1d1 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -877,11 +877,12 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
unsigned long page = (unsigned long)
page_address(pte_page(*ptep));
- __asm__ __volatile__(
- " membar #StoreStore\n"
- " flush %0 + %1"
- : : "r" (page), "r" (address & (PAGE_SIZE - 1))
- : "memory");
+ wmb();
+ __asm__ __volatile__("flush %0 + %1"
+ : /* no outputs */
+ : "r" (page),
+ "r" (address & (PAGE_SIZE - 1))
+ : "memory");
}
pte_unmap(ptep);
preempt_enable();
@@ -1292,11 +1293,12 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
unsigned long page = (unsigned long)
page_address(pte_page(*ptep));
- __asm__ __volatile__(
- " membar #StoreStore\n"
- " flush %0 + %1"
- : : "r" (page), "r" (address & (PAGE_SIZE - 1))
- : "memory");
+ wmb();
+ __asm__ __volatile__("flush %0 + %1"
+ : /* no outputs */
+ : "r" (page),
+ "r" (address & (PAGE_SIZE - 1))
+ : "memory");
}
pte_unmap(ptep);
preempt_enable();
@@ -1325,13 +1327,12 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
else
setup_frame32(&ka->sa, regs, signr, oldset, info);
}
- if (!(ka->sa.sa_flags & SA_NOMASK)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(&current->blocked,signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 441fc2e52ce6..b4fc6a5462b2 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -45,8 +45,8 @@ extern void calibrate_delay(void);
/* Please don't make this stuff initdata!!! --DaveM */
static unsigned char boot_cpu_id;
-cpumask_t cpu_online_map = CPU_MASK_NONE __read_mostly;
-cpumask_t phys_cpu_present_map = CPU_MASK_NONE __read_mostly;
+cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
+cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
static cpumask_t smp_commenced_mask;
static cpumask_t cpu_callout_map;
@@ -137,14 +137,14 @@ void __init smp_callin(void)
/* Clear this or we will die instantly when we
* schedule back to this idler...
*/
- clear_thread_flag(TIF_NEWCHILD);
+ current_thread_info()->new_child = 0;
/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
while (!cpu_isset(cpuid, smp_commenced_mask))
- membar("#LoadLoad");
+ rmb();
cpu_set(cpuid, cpu_online_map);
}
@@ -184,11 +184,11 @@ static inline long get_delta (long *rt, long *master)
for (i = 0; i < NUM_ITERS; i++) {
t0 = tick_ops->get_tick();
go[MASTER] = 1;
- membar("#StoreLoad");
+ membar_storeload();
while (!(tm = go[SLAVE]))
- membar("#LoadLoad");
+ rmb();
go[SLAVE] = 0;
- membar("#StoreStore");
+ wmb();
t1 = tick_ops->get_tick();
if (t1 - t0 < best_t1 - best_t0)
@@ -221,7 +221,7 @@ void smp_synchronize_tick_client(void)
go[MASTER] = 1;
while (go[MASTER])
- membar("#LoadLoad");
+ rmb();
local_irq_save(flags);
{
@@ -273,21 +273,21 @@ static void smp_synchronize_one_tick(int cpu)
/* wait for client to be ready */
while (!go[MASTER])
- membar("#LoadLoad");
+ rmb();
/* now let the client proceed into his loop */
go[MASTER] = 0;
- membar("#StoreLoad");
+ membar_storeload();
spin_lock_irqsave(&itc_sync_lock, flags);
{
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
while (!go[MASTER])
- membar("#LoadLoad");
+ rmb();
go[MASTER] = 0;
- membar("#StoreStore");
+ wmb();
go[SLAVE] = tick_ops->get_tick();
- membar("#StoreLoad");
+ membar_storeload();
}
}
spin_unlock_irqrestore(&itc_sync_lock, flags);
@@ -927,11 +927,11 @@ void smp_capture(void)
smp_processor_id());
#endif
penguins_are_doing_time = 1;
- membar("#StoreStore | #LoadStore");
+ membar_storestore_loadstore();
atomic_inc(&smp_capture_registry);
smp_cross_call(&xcall_capture, 0, 0, 0);
while (atomic_read(&smp_capture_registry) != ncpus)
- membar("#LoadLoad");
+ rmb();
#ifdef CAPTURE_DEBUG
printk("done\n");
#endif
@@ -947,7 +947,7 @@ void smp_release(void)
smp_processor_id());
#endif
penguins_are_doing_time = 0;
- membar("#StoreStore | #StoreLoad");
+ membar_storeload_storestore();
atomic_dec(&smp_capture_registry);
}
}
@@ -970,9 +970,9 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
save_alternate_globals(global_save);
prom_world(1);
atomic_inc(&smp_capture_registry);
- membar("#StoreLoad | #StoreStore");
+ membar_storeload_storestore();
while (penguins_are_doing_time)
- membar("#LoadLoad");
+ rmb();
restore_alternate_globals(global_save);
atomic_dec(&smp_capture_registry);
prom_world(0);
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 9202d925a9ce..d89fc24808d3 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -88,8 +88,6 @@ extern int svr4_setcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
extern int compat_sys_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
extern int (*handle_mathemu)(struct pt_regs *, struct fpustate *);
extern long sparc32_open(const char __user * filename, int flags, int mode);
-extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from,
- unsigned long offset, unsigned long size, pgprot_t prot, int space);
extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long pfn, unsigned long size, pgprot_t prot);
extern void (*prom_palette)(int);
@@ -99,17 +97,6 @@ extern int __ashrdi3(int, int);
extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
-#if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK)
-extern void _do_spin_lock (spinlock_t *lock, char *str);
-extern void _do_spin_unlock (spinlock_t *lock);
-extern int _spin_trylock (spinlock_t *lock);
-extern void _do_read_lock(rwlock_t *rw, char *str);
-extern void _do_read_unlock(rwlock_t *rw, char *str);
-extern void _do_write_lock(rwlock_t *rw, char *str);
-extern void _do_write_unlock(rwlock_t *rw);
-extern int _do_write_trylock(rwlock_t *rw, char *str);
-#endif
-
extern unsigned long phys_base;
extern unsigned long pfn_base;
@@ -152,18 +139,6 @@ EXPORT_SYMBOL(_mcount);
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(phys_cpu_present_map);
-/* Spinlock debugging library, optional. */
-#ifdef CONFIG_DEBUG_SPINLOCK
-EXPORT_SYMBOL(_do_spin_lock);
-EXPORT_SYMBOL(_do_spin_unlock);
-EXPORT_SYMBOL(_spin_trylock);
-EXPORT_SYMBOL(_do_read_lock);
-EXPORT_SYMBOL(_do_read_unlock);
-EXPORT_SYMBOL(_do_write_lock);
-EXPORT_SYMBOL(_do_write_unlock);
-EXPORT_SYMBOL(_do_write_trylock);
-#endif
-
EXPORT_SYMBOL(smp_call_function);
#endif /* CONFIG_SMP */
@@ -268,7 +243,6 @@ EXPORT_SYMBOL(pci_dma_supported);
#endif
/* I/O device mmaping on Sparc64. */
-EXPORT_SYMBOL(io_remap_page_range);
EXPORT_SYMBOL(io_remap_pfn_range);
/* Solaris/SunOS binary compatibility */
@@ -429,3 +403,12 @@ EXPORT_SYMBOL(xor_vis_4);
EXPORT_SYMBOL(xor_vis_5);
EXPORT_SYMBOL(prom_palette);
+
+/* memory barriers */
+EXPORT_SYMBOL(mb);
+EXPORT_SYMBOL(rmb);
+EXPORT_SYMBOL(wmb);
+EXPORT_SYMBOL(membar_storeload);
+EXPORT_SYMBOL(membar_storeload_storestore);
+EXPORT_SYMBOL(membar_storeload_loadload);
+EXPORT_SYMBOL(membar_storestore_loadstore);
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index bceb91a8a2bd..53eaf2345fe9 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -50,8 +50,8 @@ sys_call_table32:
.word sys_nis_syscall, sys32_mkdir, sys_rmdir, sys32_utimes, compat_sys_stat64
/*140*/ .word sys32_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit
.word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write
-/*150*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
- .word compat_sys_fcntl64, sys_ni_syscall, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
+/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
+ .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
.word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr
/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
@@ -116,8 +116,8 @@ sys_call_table:
.word sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
/*140*/ .word sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit
.word sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
-/*150*/ .word sys_getsockname, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
- .word sys_nis_syscall, sys_ni_syscall, sys_statfs, sys_fstatfs, sys_oldumount
+/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
+ .word sys_nis_syscall, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount
/*160*/ .word sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_utrap_install
.word sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index a9f4596d7c2b..b280b2ef674f 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -33,6 +33,7 @@
#include <asm/dcu.h>
#include <asm/estate.h>
#include <asm/chafsr.h>
+#include <asm/sfafsr.h>
#include <asm/psrcompat.h>
#include <asm/processor.h>
#include <asm/timer.h>
@@ -143,8 +144,7 @@ void do_BUG(const char *file, int line)
}
#endif
-void instruction_access_exception(struct pt_regs *regs,
- unsigned long sfsr, unsigned long sfar)
+void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{
siginfo_t info;
@@ -153,8 +153,8 @@ void instruction_access_exception(struct pt_regs *regs,
return;
if (regs->tstate & TSTATE_PRIV) {
- printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
- sfsr, sfar);
+ printk("spitfire_insn_access_exception: SFSR[%016lx] "
+ "SFAR[%016lx], going.\n", sfsr, sfar);
die_if_kernel("Iax", regs);
}
if (test_thread_flag(TIF_32BIT)) {
@@ -169,19 +169,17 @@ void instruction_access_exception(struct pt_regs *regs,
force_sig_info(SIGSEGV, &info, current);
}
-void instruction_access_exception_tl1(struct pt_regs *regs,
- unsigned long sfsr, unsigned long sfar)
+void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{
if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
0, 0x8, SIGTRAP) == NOTIFY_STOP)
return;
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- instruction_access_exception(regs, sfsr, sfar);
+ spitfire_insn_access_exception(regs, sfsr, sfar);
}
-void data_access_exception(struct pt_regs *regs,
- unsigned long sfsr, unsigned long sfar)
+void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{
siginfo_t info;
@@ -207,8 +205,8 @@ void data_access_exception(struct pt_regs *regs,
return;
}
/* Shit... */
- printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
- sfsr, sfar);
+ printk("spitfire_data_access_exception: SFSR[%016lx] "
+ "SFAR[%016lx], going.\n", sfsr, sfar);
die_if_kernel("Dax", regs);
}
@@ -220,6 +218,16 @@ void data_access_exception(struct pt_regs *regs,
force_sig_info(SIGSEGV, &info, current);
}
+void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
+{
+ if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
+ 0, 0x30, SIGTRAP) == NOTIFY_STOP)
+ return;
+
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ spitfire_data_access_exception(regs, sfsr, sfar);
+}
+
#ifdef CONFIG_PCI
/* This is really pathetic... */
extern volatile int pci_poke_in_progress;
@@ -253,54 +261,13 @@ static void spitfire_clean_and_reenable_l1_caches(void)
: "memory");
}
-void do_iae(struct pt_regs *regs)
+static void spitfire_enable_estate_errors(void)
{
- siginfo_t info;
-
- spitfire_clean_and_reenable_l1_caches();
-
- if (notify_die(DIE_TRAP, "instruction access exception", regs,
- 0, 0x8, SIGTRAP) == NOTIFY_STOP)
- return;
-
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_OBJERR;
- info.si_addr = (void *)0;
- info.si_trapno = 0;
- force_sig_info(SIGBUS, &info, current);
-}
-
-void do_dae(struct pt_regs *regs)
-{
- siginfo_t info;
-
-#ifdef CONFIG_PCI
- if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
- spitfire_clean_and_reenable_l1_caches();
-
- pci_poke_faulted = 1;
-
- /* Why the fuck did they have to change this? */
- if (tlb_type == cheetah || tlb_type == cheetah_plus)
- regs->tpc += 4;
-
- regs->tnpc = regs->tpc + 4;
- return;
- }
-#endif
- spitfire_clean_and_reenable_l1_caches();
-
- if (notify_die(DIE_TRAP, "data access exception", regs,
- 0, 0x30, SIGTRAP) == NOTIFY_STOP)
- return;
-
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_OBJERR;
- info.si_addr = (void *)0;
- info.si_trapno = 0;
- force_sig_info(SIGBUS, &info, current);
+ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (ESTATE_ERR_ALL),
+ "i" (ASI_ESTATE_ERROR_EN));
}
static char ecc_syndrome_table[] = {
@@ -338,65 +305,15 @@ static char ecc_syndrome_table[] = {
0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
};
-/* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
- * in the following format. The AFAR is left as is, with
- * reserved bits cleared, and is a raw 40-bit physical
- * address.
- */
-#define CE_STATUS_UDBH_UE (1UL << (43 + 9))
-#define CE_STATUS_UDBH_CE (1UL << (43 + 8))
-#define CE_STATUS_UDBH_ESYNDR (0xffUL << 43)
-#define CE_STATUS_UDBH_SHIFT 43
-#define CE_STATUS_UDBL_UE (1UL << (33 + 9))
-#define CE_STATUS_UDBL_CE (1UL << (33 + 8))
-#define CE_STATUS_UDBL_ESYNDR (0xffUL << 33)
-#define CE_STATUS_UDBL_SHIFT 33
-#define CE_STATUS_AFSR_MASK (0x1ffffffffUL)
-#define CE_STATUS_AFSR_ME (1UL << 32)
-#define CE_STATUS_AFSR_PRIV (1UL << 31)
-#define CE_STATUS_AFSR_ISAP (1UL << 30)
-#define CE_STATUS_AFSR_ETP (1UL << 29)
-#define CE_STATUS_AFSR_IVUE (1UL << 28)
-#define CE_STATUS_AFSR_TO (1UL << 27)
-#define CE_STATUS_AFSR_BERR (1UL << 26)
-#define CE_STATUS_AFSR_LDP (1UL << 25)
-#define CE_STATUS_AFSR_CP (1UL << 24)
-#define CE_STATUS_AFSR_WP (1UL << 23)
-#define CE_STATUS_AFSR_EDP (1UL << 22)
-#define CE_STATUS_AFSR_UE (1UL << 21)
-#define CE_STATUS_AFSR_CE (1UL << 20)
-#define CE_STATUS_AFSR_ETS (0xfUL << 16)
-#define CE_STATUS_AFSR_ETS_SHIFT 16
-#define CE_STATUS_AFSR_PSYND (0xffffUL << 0)
-#define CE_STATUS_AFSR_PSYND_SHIFT 0
-
-/* Layout of Ecache TAG Parity Syndrome of AFSR */
-#define AFSR_ETSYNDROME_7_0 0x1UL /* E$-tag bus bits <7:0> */
-#define AFSR_ETSYNDROME_15_8 0x2UL /* E$-tag bus bits <15:8> */
-#define AFSR_ETSYNDROME_21_16 0x4UL /* E$-tag bus bits <21:16> */
-#define AFSR_ETSYNDROME_24_22 0x8UL /* E$-tag bus bits <24:22> */
-
static char *syndrome_unknown = "<Unknown>";
-asmlinkage void cee_log(unsigned long ce_status,
- unsigned long afar,
- struct pt_regs *regs)
+static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
{
- char memmod_str[64];
- char *p;
- unsigned short scode, udb_reg;
+ unsigned short scode;
+ char memmod_str[64], *p;
- printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
- "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n",
- smp_processor_id(),
- (ce_status & CE_STATUS_AFSR_MASK),
- afar,
- ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
- ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));
-
- udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
- if (udb_reg & (1 << 8)) {
- scode = ecc_syndrome_table[udb_reg & 0xff];
+ if (udbl & bit) {
+ scode = ecc_syndrome_table[udbl & 0xff];
if (prom_getunumber(scode, afar,
memmod_str, sizeof(memmod_str)) == -1)
p = syndrome_unknown;
@@ -407,9 +324,8 @@ asmlinkage void cee_log(unsigned long ce_status,
smp_processor_id(), scode, p);
}
- udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL);
- if (udb_reg & (1 << 8)) {
- scode = ecc_syndrome_table[udb_reg & 0xff];
+ if (udbh & bit) {
+ scode = ecc_syndrome_table[udbh & 0xff];
if (prom_getunumber(scode, afar,
memmod_str, sizeof(memmod_str)) == -1)
p = syndrome_unknown;
@@ -419,6 +335,127 @@ asmlinkage void cee_log(unsigned long ce_status,
"Memory Module \"%s\"\n",
smp_processor_id(), scode, p);
}
+
+}
+
+static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
+{
+
+ printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
+ "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
+ smp_processor_id(), afsr, afar, udbl, udbh, tl1);
+
+ spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
+
+ /* We always log it, even if someone is listening for this
+ * trap.
+ */
+ notify_die(DIE_TRAP, "Correctable ECC Error", regs,
+ 0, TRAP_TYPE_CEE, SIGTRAP);
+
+ /* The Correctable ECC Error trap does not disable I/D caches. So
+ * we only have to restore the ESTATE Error Enable register.
+ */
+ spitfire_enable_estate_errors();
+}
+
+static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
+ "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
+ smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
+
+ /* XXX add more human friendly logging of the error status
+ * XXX as is implemented for cheetah
+ */
+
+ spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
+
+ /* We always log it, even if someone is listening for this
+ * trap.
+ */
+ notify_die(DIE_TRAP, "Uncorrectable Error", regs,
+ 0, tt, SIGTRAP);
+
+ if (regs->tstate & TSTATE_PRIV) {
+ if (tl1)
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("UE", regs);
+ }
+
+ /* XXX need more intelligent processing here, such as is implemented
+ * XXX for cheetah errors, in fact if the E-cache still holds the
+ * XXX line with bad parity this will loop
+ */
+
+ spitfire_clean_and_reenable_l1_caches();
+ spitfire_enable_estate_errors();
+
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_OBJERR;
+ info.si_addr = (void *)0;
+ info.si_trapno = 0;
+ force_sig_info(SIGBUS, &info, current);
+}
+
+void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
+{
+ unsigned long afsr, tt, udbh, udbl;
+ int tl1;
+
+ afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
+ tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
+ tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
+ udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
+ udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
+
+#ifdef CONFIG_PCI
+ if (tt == TRAP_TYPE_DAE &&
+ pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
+ spitfire_clean_and_reenable_l1_caches();
+ spitfire_enable_estate_errors();
+
+ pci_poke_faulted = 1;
+ regs->tnpc = regs->tpc + 4;
+ return;
+ }
+#endif
+
+ if (afsr & SFAFSR_UE)
+ spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
+
+ if (tt == TRAP_TYPE_CEE) {
+ /* Handle the case where we took a CEE trap, but ACK'd
+ * only the UE state in the UDB error registers.
+ */
+ if (afsr & SFAFSR_UE) {
+ if (udbh & UDBE_CE) {
+ __asm__ __volatile__(
+ "stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (udbh & UDBE_CE),
+ "r" (0x0), "i" (ASI_UDB_ERROR_W));
+ }
+ if (udbl & UDBE_CE) {
+ __asm__ __volatile__(
+ "stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (udbl & UDBE_CE),
+ "r" (0x18), "i" (ASI_UDB_ERROR_W));
+ }
+ }
+
+ spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
+ }
}
int cheetah_pcache_forced_on;
@@ -2125,6 +2162,11 @@ void __init trap_init(void)
TI_PCR != offsetof(struct thread_info, pcr_reg) ||
TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
+ TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
+ TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
+ TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
+ TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
+ TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
TI_FPREGS != offsetof(struct thread_info, fpregs) ||
(TI_FPREGS & (64 - 1)))
thread_info_offsets_are_bolixed_dave();
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index 491bb3681f9d..8365bc1f81f3 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -18,9 +18,10 @@ sparc64_ttable_tl0:
tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
tl0_iax: membar #Sync
- TRAP_NOSAVE_7INSNS(__do_instruction_access_exception)
+ TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception)
tl0_resv009: BTRAP(0x9)
-tl0_iae: TRAP(do_iae)
+tl0_iae: membar #Sync
+ TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
tl0_ill: membar #Sync
TRAP_7INSNS(do_illegal_instruction)
@@ -36,9 +37,10 @@ tl0_cwin: CLEAN_WINDOW
tl0_div0: TRAP(do_div0)
tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
tl0_resv02f: BTRAP(0x2f)
-tl0_dax: TRAP_NOSAVE(__do_data_access_exception)
+tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception)
tl0_resv031: BTRAP(0x31)
-tl0_dae: TRAP(do_dae)
+tl0_dae: membar #Sync
+ TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl0_resv033: BTRAP(0x33)
tl0_mna: TRAP_NOSAVE(do_mna)
tl0_lddfmna: TRAP_NOSAVE(do_lddfmna)
@@ -73,7 +75,8 @@ tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
tl0_ivec: TRAP_IVEC
tl0_paw: TRAP(do_paw)
tl0_vaw: TRAP(do_vaw)
-tl0_cee: TRAP_NOSAVE(cee_trap)
+tl0_cee: membar #Sync
+ TRAP_NOSAVE_7INSNS(__spitfire_cee_trap)
tl0_iamiss:
#include "itlb_base.S"
tl0_damiss:
@@ -175,9 +178,10 @@ tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8)
sparc64_ttable_tl1:
tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
-tl1_iax: TRAP_NOSAVE(__do_instruction_access_exception_tl1)
+tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1)
tl1_resv009: BTRAPTL1(0x9)
-tl1_iae: TRAPTL1(do_iae_tl1)
+tl1_iae: membar #Sync
+ TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
tl1_ill: TRAPTL1(do_ill_tl1)
tl1_privop: BTRAPTL1(0x11)
@@ -193,9 +197,10 @@ tl1_cwin: CLEAN_WINDOW
tl1_div0: TRAPTL1(do_div0_tl1)
tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
-tl1_dax: TRAP_NOSAVE(__do_data_access_exception_tl1)
+tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1)
tl1_resv031: BTRAPTL1(0x31)
-tl1_dae: TRAPTL1(do_dae_tl1)
+tl1_dae: membar #Sync
+ TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl1_resv033: BTRAPTL1(0x33)
tl1_mna: TRAP_NOSAVE(do_mna)
tl1_lddfmna: TRAPTL1(do_lddfmna_tl1)
@@ -219,8 +224,8 @@ tl1_paw: TRAPTL1(do_paw_tl1)
tl1_vaw: TRAPTL1(do_vaw_tl1)
/* The grotty trick to save %g1 into current->thread.cee_stuff
- * is because when we take this trap we could be interrupting trap
- * code already using the trap alternate global registers.
+ * is because when we take this trap we could be interrupting
+ * trap code already using the trap alternate global registers.
*
* We cross our fingers and pray that this store/load does
* not cause yet another CEE trap.
diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S
new file mode 100644
index 000000000000..cbb40585253c
--- /dev/null
+++ b/arch/sparc64/kernel/una_asm.S
@@ -0,0 +1,153 @@
+/* una_asm.S: Kernel unaligned trap assembler helpers.
+ *
+ * Copyright (C) 1996,2005 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+ .text
+
+kernel_unaligned_trap_fault:
+ call kernel_mna_trap_fault
+ nop
+ retl
+ nop
+ .size kern_unaligned_trap_fault, .-kern_unaligned_trap_fault
+
+ .globl __do_int_store
+__do_int_store:
+ rd %asi, %o4
+ wr %o3, 0, %asi
+ ldx [%o2], %g3
+ cmp %o1, 2
+ be,pn %icc, 2f
+ cmp %o1, 4
+ be,pt %icc, 1f
+ srlx %g3, 24, %g2
+ srlx %g3, 56, %g1
+ srlx %g3, 48, %g7
+4: stba %g1, [%o0] %asi
+ srlx %g3, 40, %g1
+5: stba %g7, [%o0 + 1] %asi
+ srlx %g3, 32, %g7
+6: stba %g1, [%o0 + 2] %asi
+7: stba %g7, [%o0 + 3] %asi
+ srlx %g3, 16, %g1
+8: stba %g2, [%o0 + 4] %asi
+ srlx %g3, 8, %g7
+9: stba %g1, [%o0 + 5] %asi
+10: stba %g7, [%o0 + 6] %asi
+ ba,pt %xcc, 0f
+11: stba %g3, [%o0 + 7] %asi
+1: srl %g3, 16, %g7
+12: stba %g2, [%o0] %asi
+ srl %g3, 8, %g2
+13: stba %g7, [%o0 + 1] %asi
+14: stba %g2, [%o0 + 2] %asi
+ ba,pt %xcc, 0f
+15: stba %g3, [%o0 + 3] %asi
+2: srl %g3, 8, %g2
+16: stba %g2, [%o0] %asi
+17: stba %g3, [%o0 + 1] %asi
+0:
+ wr %o4, 0x0, %asi
+ retl
+ nop
+ .size __do_int_store, .-__do_int_store
+
+ .section __ex_table
+ .word 4b, kernel_unaligned_trap_fault
+ .word 5b, kernel_unaligned_trap_fault
+ .word 6b, kernel_unaligned_trap_fault
+ .word 7b, kernel_unaligned_trap_fault
+ .word 8b, kernel_unaligned_trap_fault
+ .word 9b, kernel_unaligned_trap_fault
+ .word 10b, kernel_unaligned_trap_fault
+ .word 11b, kernel_unaligned_trap_fault
+ .word 12b, kernel_unaligned_trap_fault
+ .word 13b, kernel_unaligned_trap_fault
+ .word 14b, kernel_unaligned_trap_fault
+ .word 15b, kernel_unaligned_trap_fault
+ .word 16b, kernel_unaligned_trap_fault
+ .word 17b, kernel_unaligned_trap_fault
+ .previous
+
+ .globl do_int_load
+do_int_load:
+ rd %asi, %o5
+ wr %o4, 0, %asi
+ cmp %o1, 8
+ bge,pn %icc, 9f
+ cmp %o1, 4
+ be,pt %icc, 6f
+4: lduba [%o2] %asi, %g2
+5: lduba [%o2 + 1] %asi, %g3
+ sll %g2, 8, %g2
+ brz,pt %o3, 3f
+ add %g2, %g3, %g2
+ sllx %g2, 48, %g2
+ srax %g2, 48, %g2
+3: ba,pt %xcc, 0f
+ stx %g2, [%o0]
+6: lduba [%o2 + 1] %asi, %g3
+ sll %g2, 24, %g2
+7: lduba [%o2 + 2] %asi, %g7
+ sll %g3, 16, %g3
+8: lduba [%o2 + 3] %asi, %g1
+ sll %g7, 8, %g7
+ or %g2, %g3, %g2
+ or %g7, %g1, %g7
+ or %g2, %g7, %g2
+ brnz,a,pt %o3, 3f
+ sra %g2, 0, %g2
+3: ba,pt %xcc, 0f
+ stx %g2, [%o0]
+9: lduba [%o2] %asi, %g2
+10: lduba [%o2 + 1] %asi, %g3
+ sllx %g2, 56, %g2
+11: lduba [%o2 + 2] %asi, %g7
+ sllx %g3, 48, %g3
+12: lduba [%o2 + 3] %asi, %g1
+ sllx %g7, 40, %g7
+ sllx %g1, 32, %g1
+ or %g2, %g3, %g2
+ or %g7, %g1, %g7
+13: lduba [%o2 + 4] %asi, %g3
+ or %g2, %g7, %g7
+14: lduba [%o2 + 5] %asi, %g1
+ sllx %g3, 24, %g3
+15: lduba [%o2 + 6] %asi, %g2
+ sllx %g1, 16, %g1
+ or %g7, %g3, %g7
+16: lduba [%o2 + 7] %asi, %g3
+ sllx %g2, 8, %g2
+ or %g7, %g1, %g7
+ or %g2, %g3, %g2
+ or %g7, %g2, %g7
+ cmp %o1, 8
+ be,a,pt %icc, 0f
+ stx %g7, [%o0]
+ srlx %g7, 32, %g2
+ sra %g7, 0, %g7
+ stx %g2, [%o0]
+ stx %g7, [%o0 + 8]
+0:
+ wr %o5, 0x0, %asi
+ retl
+ nop
+ .size __do_int_load, .-__do_int_load
+
+ .section __ex_table
+ .word 4b, kernel_unaligned_trap_fault
+ .word 5b, kernel_unaligned_trap_fault
+ .word 6b, kernel_unaligned_trap_fault
+ .word 7b, kernel_unaligned_trap_fault
+ .word 8b, kernel_unaligned_trap_fault
+ .word 9b, kernel_unaligned_trap_fault
+ .word 10b, kernel_unaligned_trap_fault
+ .word 11b, kernel_unaligned_trap_fault
+ .word 12b, kernel_unaligned_trap_fault
+ .word 13b, kernel_unaligned_trap_fault
+ .word 14b, kernel_unaligned_trap_fault
+ .word 15b, kernel_unaligned_trap_fault
+ .word 16b, kernel_unaligned_trap_fault
+ .previous
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index 4372bf32ecf6..da9739f0d437 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -180,169 +180,28 @@ static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
die_if_kernel(str, regs);
}
-#define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
-__asm__ __volatile__ ( \
- "wr %4, 0, %%asi\n\t" \
- "cmp %1, 8\n\t" \
- "bge,pn %%icc, 9f\n\t" \
- " cmp %1, 4\n\t" \
- "be,pt %%icc, 6f\n" \
-"4:\t" " lduba [%2] %%asi, %%l1\n" \
-"5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
- "sll %%l1, 8, %%l1\n\t" \
- "brz,pt %3, 3f\n\t" \
- " add %%l1, %%l2, %%l1\n\t" \
- "sllx %%l1, 48, %%l1\n\t" \
- "srax %%l1, 48, %%l1\n" \
-"3:\t" "ba,pt %%xcc, 0f\n\t" \
- " stx %%l1, [%0]\n" \
-"6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
- "sll %%l1, 24, %%l1\n" \
-"7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
- "sll %%l2, 16, %%l2\n" \
-"8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
- "sll %%g7, 8, %%g7\n\t" \
- "or %%l1, %%l2, %%l1\n\t" \
- "or %%g7, %%g1, %%g7\n\t" \
- "or %%l1, %%g7, %%l1\n\t" \
- "brnz,a,pt %3, 3f\n\t" \
- " sra %%l1, 0, %%l1\n" \
-"3:\t" "ba,pt %%xcc, 0f\n\t" \
- " stx %%l1, [%0]\n" \
-"9:\t" "lduba [%2] %%asi, %%l1\n" \
-"10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
- "sllx %%l1, 56, %%l1\n" \
-"11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
- "sllx %%l2, 48, %%l2\n" \
-"12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
- "sllx %%g7, 40, %%g7\n\t" \
- "sllx %%g1, 32, %%g1\n\t" \
- "or %%l1, %%l2, %%l1\n\t" \
- "or %%g7, %%g1, %%g7\n" \
-"13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \
- "or %%l1, %%g7, %%g7\n" \
-"14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \
- "sllx %%l2, 24, %%l2\n" \
-"15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \
- "sllx %%g1, 16, %%g1\n\t" \
- "or %%g7, %%l2, %%g7\n" \
-"16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \
- "sllx %%l1, 8, %%l1\n\t" \
- "or %%g7, %%g1, %%g7\n\t" \
- "or %%l1, %%l2, %%l1\n\t" \
- "or %%g7, %%l1, %%g7\n\t" \
- "cmp %1, 8\n\t" \
- "be,a,pt %%icc, 0f\n\t" \
- " stx %%g7, [%0]\n\t" \
- "srlx %%g7, 32, %%l1\n\t" \
- "sra %%g7, 0, %%g7\n\t" \
- "stx %%l1, [%0]\n\t" \
- "stx %%g7, [%0 + 8]\n" \
-"0:\n\t" \
- "wr %%g0, %5, %%asi\n\n\t" \
- ".section __ex_table\n\t" \
- ".word 4b, " #errh "\n\t" \
- ".word 5b, " #errh "\n\t" \
- ".word 6b, " #errh "\n\t" \
- ".word 7b, " #errh "\n\t" \
- ".word 8b, " #errh "\n\t" \
- ".word 9b, " #errh "\n\t" \
- ".word 10b, " #errh "\n\t" \
- ".word 11b, " #errh "\n\t" \
- ".word 12b, " #errh "\n\t" \
- ".word 13b, " #errh "\n\t" \
- ".word 14b, " #errh "\n\t" \
- ".word 15b, " #errh "\n\t" \
- ".word 16b, " #errh "\n\n\t" \
- ".previous\n\t" \
- : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), \
- "r" (asi), "i" (ASI_AIUS) \
- : "l1", "l2", "g7", "g1", "cc"); \
-})
+extern void do_int_load(unsigned long *dest_reg, int size,
+ unsigned long *saddr, int is_signed, int asi);
-#define store_common(dst_addr, size, src_val, asi, errh) ({ \
-__asm__ __volatile__ ( \
- "wr %3, 0, %%asi\n\t" \
- "ldx [%2], %%l1\n" \
- "cmp %1, 2\n\t" \
- "be,pn %%icc, 2f\n\t" \
- " cmp %1, 4\n\t" \
- "be,pt %%icc, 1f\n\t" \
- " srlx %%l1, 24, %%l2\n\t" \
- "srlx %%l1, 56, %%g1\n\t" \
- "srlx %%l1, 48, %%g7\n" \
-"4:\t" "stba %%g1, [%0] %%asi\n\t" \
- "srlx %%l1, 40, %%g1\n" \
-"5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \
- "srlx %%l1, 32, %%g7\n" \
-"6:\t" "stba %%g1, [%0 + 2] %%asi\n" \
-"7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \
- "srlx %%l1, 16, %%g1\n" \
-"8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \
- "srlx %%l1, 8, %%g7\n" \
-"9:\t" "stba %%g1, [%0 + 5] %%asi\n" \
-"10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \
- "ba,pt %%xcc, 0f\n" \
-"11:\t" " stba %%l1, [%0 + 7] %%asi\n" \
-"1:\t" "srl %%l1, 16, %%g7\n" \
-"12:\t" "stba %%l2, [%0] %%asi\n\t" \
- "srl %%l1, 8, %%l2\n" \
-"13:\t" "stba %%g7, [%0 + 1] %%asi\n" \
-"14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \
- "ba,pt %%xcc, 0f\n" \
-"15:\t" " stba %%l1, [%0 + 3] %%asi\n" \
-"2:\t" "srl %%l1, 8, %%l2\n" \
-"16:\t" "stba %%l2, [%0] %%asi\n" \
-"17:\t" "stba %%l1, [%0 + 1] %%asi\n" \
-"0:\n\t" \
- "wr %%g0, %4, %%asi\n\n\t" \
- ".section __ex_table\n\t" \
- ".word 4b, " #errh "\n\t" \
- ".word 5b, " #errh "\n\t" \
- ".word 6b, " #errh "\n\t" \
- ".word 7b, " #errh "\n\t" \
- ".word 8b, " #errh "\n\t" \
- ".word 9b, " #errh "\n\t" \
- ".word 10b, " #errh "\n\t" \
- ".word 11b, " #errh "\n\t" \
- ".word 12b, " #errh "\n\t" \
- ".word 13b, " #errh "\n\t" \
- ".word 14b, " #errh "\n\t" \
- ".word 15b, " #errh "\n\t" \
- ".word 16b, " #errh "\n\t" \
- ".word 17b, " #errh "\n\n\t" \
- ".previous\n\t" \
- : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\
- : "l1", "l2", "g7", "g1", "cc"); \
-})
-
-#define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \
- unsigned long zero = 0; \
- unsigned long *src_val = &zero; \
- \
- if (size == 16) { \
- size = 8; \
- zero = (((long)(reg_num ? \
- (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \
- (unsigned)fetch_reg(reg_num + 1, regs); \
- } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
- store_common(dst_addr, size, src_val, asi, errh); \
-})
-
-extern void smp_capture(void);
-extern void smp_release(void);
-
-#define do_atomic(srcdest_reg, mem, errh) ({ \
- unsigned long flags, tmp; \
- \
- smp_capture(); \
- local_irq_save(flags); \
- tmp = *srcdest_reg; \
- do_integer_load(srcdest_reg, 4, mem, 0, errh); \
- store_common(mem, 4, &tmp, errh); \
- local_irq_restore(flags); \
- smp_release(); \
-})
+extern void __do_int_store(unsigned long *dst_addr, int size,
+ unsigned long *src_val, int asi);
+
+static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
+ struct pt_regs *regs, int asi)
+{
+ unsigned long zero = 0;
+ unsigned long *src_val = &zero;
+
+ if (size == 16) {
+ size = 8;
+ zero = (((long)(reg_num ?
+ (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
+ (unsigned)fetch_reg(reg_num + 1, regs);
+ } else if (reg_num) {
+ src_val = fetch_reg_addr(reg_num, regs);
+ }
+ __do_int_store(dst_addr, size, src_val, asi);
+}
static inline void advance(struct pt_regs *regs)
{
@@ -364,24 +223,29 @@ static inline int ok_for_kernel(unsigned int insn)
return !floating_point_load_or_store_p(insn);
}
-void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
-
-void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
+void kernel_mna_trap_fault(void)
{
- unsigned long g2 = regs->u_regs [UREG_G2];
+ struct pt_regs *regs = current_thread_info()->kern_una_regs;
+ unsigned int insn = current_thread_info()->kern_una_insn;
+ unsigned long g2 = regs->u_regs[UREG_G2];
unsigned long fixup = search_extables_range(regs->tpc, &g2);
if (!fixup) {
- unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
+ unsigned long address;
+
+ address = compute_effective_address(regs, insn,
+ ((insn >> 25) & 0x1f));
if (address < PAGE_SIZE) {
- printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
+ printk(KERN_ALERT "Unable to handle kernel NULL "
+ "pointer dereference in mna handler");
} else
- printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
+ printk(KERN_ALERT "Unable to handle kernel paging "
+ "request in mna handler");
printk(KERN_ALERT " at virtual address %016lx\n",address);
- printk(KERN_ALERT "current->{mm,active_mm}->context = %016lx\n",
+ printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
(current->mm ? CTX_HWBITS(current->mm->context) :
CTX_HWBITS(current->active_mm->context)));
- printk(KERN_ALERT "current->{mm,active_mm}->pgd = %016lx\n",
+ printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
(current->mm ? (unsigned long) current->mm->pgd :
(unsigned long) current->active_mm->pgd));
die_if_kernel("Oops", regs);
@@ -400,48 +264,41 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
enum direction dir = decode_direction(insn);
int size = decode_access_size(insn);
+ current_thread_info()->kern_una_regs = regs;
+ current_thread_info()->kern_una_insn = insn;
+
if (!ok_for_kernel(insn) || dir == both) {
- printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
- regs->tpc);
- unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
-
- __asm__ __volatile__ ("\n"
-"kernel_unaligned_trap_fault:\n\t"
- "mov %0, %%o0\n\t"
- "call kernel_mna_trap_fault\n\t"
- " mov %1, %%o1\n\t"
- :
- : "r" (regs), "r" (insn)
- : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
- "g1", "g2", "g3", "g4", "g7", "cc");
+ printk("Unsupported unaligned load/store trap for kernel "
+ "at <%016lx>.\n", regs->tpc);
+ unaligned_panic("Kernel does fpu/atomic "
+ "unaligned load/store.", regs);
+
+ kernel_mna_trap_fault();
} else {
- unsigned long addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
+ unsigned long addr;
+ addr = compute_effective_address(regs, insn,
+ ((insn >> 25) & 0x1f));
#ifdef DEBUG_MNA
- printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
- regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
+ printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] "
+ "retpc[%016lx]\n",
+ regs->tpc, dirstrings[dir], addr, size,
+ regs->u_regs[UREG_RETPC]);
#endif
switch (dir) {
case load:
- do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
- size, (unsigned long *) addr,
- decode_signedness(insn), decode_asi(insn, regs),
- kernel_unaligned_trap_fault);
+ do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ size, (unsigned long *) addr,
+ decode_signedness(insn),
+ decode_asi(insn, regs));
break;
case store:
- do_integer_store(((insn>>25)&0x1f), size,
- (unsigned long *) addr, regs,
- decode_asi(insn, regs),
- kernel_unaligned_trap_fault);
- break;
-#if 0 /* unsupported */
- case both:
- do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
- (unsigned long *) addr,
- kernel_unaligned_trap_fault);
+ do_int_store(((insn>>25)&0x1f), size,
+ (unsigned long *) addr, regs,
+ decode_asi(insn, regs));
break;
-#endif
+
default:
panic("Impossible kernel unaligned trap.");
/* Not reached... */
@@ -492,9 +349,9 @@ int handle_popc(u32 insn, struct pt_regs *regs)
extern void do_fpother(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs);
-extern void data_access_exception(struct pt_regs *regs,
- unsigned long sfsr,
- unsigned long sfar);
+extern void spitfire_data_access_exception(struct pt_regs *regs,
+ unsigned long sfsr,
+ unsigned long sfar);
int handle_ldf_stq(u32 insn, struct pt_regs *regs)
{
@@ -537,14 +394,14 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
break;
}
default:
- data_access_exception(regs, 0, addr);
+ spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (put_user (first >> 32, (u32 __user *)addr) ||
__put_user ((u32)first, (u32 __user *)(addr + 4)) ||
__put_user (second >> 32, (u32 __user *)(addr + 8)) ||
__put_user ((u32)second, (u32 __user *)(addr + 12))) {
- data_access_exception(regs, 0, addr);
+ spitfire_data_access_exception(regs, 0, addr);
return 1;
}
} else {
@@ -557,7 +414,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
do_privact(regs);
return 1;
} else if (asi > ASI_SNFL) {
- data_access_exception(regs, 0, addr);
+ spitfire_data_access_exception(regs, 0, addr);
return 1;
}
switch (insn & 0x180000) {
@@ -574,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
}
if (err && !(asi & 0x2 /* NF */)) {
- data_access_exception(regs, 0, addr);
+ spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (asi & 0x8) /* Little */ {
@@ -677,7 +534,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
*(u64 *)(f->regs + freg) = value;
current_thread_info()->fpsaved[0] |= flag;
} else {
-daex: data_access_exception(regs, sfsr, sfar);
+daex: spitfire_data_access_exception(regs, sfsr, sfar);
return;
}
advance(regs);
@@ -721,7 +578,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
__put_user ((u32)value, (u32 __user *)(sfar + 4)))
goto daex;
} else {
-daex: data_access_exception(regs, sfsr, sfar);
+daex: spitfire_data_access_exception(regs, sfsr, sfar);
return;
}
advance(regs);
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c
index 7aae0a18aabe..686e526bec04 100644
--- a/arch/sparc64/kernel/us2e_cpufreq.c
+++ b/arch/sparc64/kernel/us2e_cpufreq.c
@@ -88,7 +88,6 @@ static void frob_mem_refresh(int cpu_slowing_down,
{
unsigned long old_refr_count, refr_count, mctrl;
-
refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
@@ -230,6 +229,25 @@ static unsigned long estar_to_divisor(unsigned long estar)
return ret;
}
+static unsigned int us2e_freq_get(unsigned int cpu)
+{
+ cpumask_t cpus_allowed;
+ unsigned long clock_tick, estar;
+
+ if (!cpu_online(cpu))
+ return 0;
+
+ cpus_allowed = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+
+ clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+
+ set_cpus_allowed(current, cpus_allowed);
+
+ return clock_tick / estar_to_divisor(estar);
+}
+
static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
{
unsigned long new_bits, new_freq;
@@ -243,7 +261,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
cpus_allowed = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu));
- new_freq = clock_tick = sparc64_get_clock_tick(cpu);
+ new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
new_bits = index_to_estar_mode(index);
divisor = index_to_divisor(index);
new_freq /= divisor;
@@ -258,7 +276,8 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
if (old_divisor != divisor)
- us2e_transition(estar, new_bits, clock_tick, old_divisor, divisor);
+ us2e_transition(estar, new_bits, clock_tick * 1000,
+ old_divisor, divisor);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
@@ -272,10 +291,8 @@ static int us2e_freq_target(struct cpufreq_policy *policy,
unsigned int new_index = 0;
if (cpufreq_frequency_table_target(policy,
- &us2e_freq_table[policy->cpu].table[0],
- target_freq,
- relation,
- &new_index))
+ &us2e_freq_table[policy->cpu].table[0],
+ target_freq, relation, &new_index))
return -EINVAL;
us2e_set_cpu_divider_index(policy->cpu, new_index);
@@ -292,7 +309,7 @@ static int us2e_freq_verify(struct cpufreq_policy *policy)
static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
- unsigned long clock_tick = sparc64_get_clock_tick(cpu);
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
struct cpufreq_frequency_table *table =
&us2e_freq_table[cpu].table[0];
@@ -351,9 +368,10 @@ static int __init us2e_freq_init(void)
memset(us2e_freq_table, 0,
(NR_CPUS * sizeof(struct us2e_freq_percpu_info)));
+ driver->init = us2e_freq_cpu_init;
driver->verify = us2e_freq_verify;
driver->target = us2e_freq_target;
- driver->init = us2e_freq_cpu_init;
+ driver->get = us2e_freq_get;
driver->exit = us2e_freq_cpu_exit;
driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-IIe");
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
index 18fe54b8aa55..9080e7cd4bb0 100644
--- a/arch/sparc64/kernel/us3_cpufreq.c
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -56,7 +56,7 @@ static void write_safari_cfg(unsigned long val)
static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
{
- unsigned long clock_tick = sparc64_get_clock_tick(cpu);
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
unsigned long ret;
switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
@@ -76,6 +76,26 @@ static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg
return ret;
}
+static unsigned int us3_freq_get(unsigned int cpu)
+{
+ cpumask_t cpus_allowed;
+ unsigned long reg;
+ unsigned int ret;
+
+ if (!cpu_online(cpu))
+ return 0;
+
+ cpus_allowed = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+
+ reg = read_safari_cfg();
+ ret = get_current_freq(cpu, reg);
+
+ set_cpus_allowed(current, cpus_allowed);
+
+ return ret;
+}
+
static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
{
unsigned long new_bits, new_freq, reg;
@@ -88,7 +108,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
cpus_allowed = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu));
- new_freq = sparc64_get_clock_tick(cpu);
+ new_freq = sparc64_get_clock_tick(cpu) / 1000;
switch (index) {
case 0:
new_bits = SAFARI_CFG_DIV_1;
@@ -150,7 +170,7 @@ static int us3_freq_verify(struct cpufreq_policy *policy)
static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
- unsigned long clock_tick = sparc64_get_clock_tick(cpu);
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
struct cpufreq_frequency_table *table =
&us3_freq_table[cpu].table[0];
@@ -206,9 +226,10 @@ static int __init us3_freq_init(void)
memset(us3_freq_table, 0,
(NR_CPUS * sizeof(struct us3_freq_percpu_info)));
+ driver->init = us3_freq_cpu_init;
driver->verify = us3_freq_verify;
driver->target = us3_freq_target;
- driver->init = us3_freq_cpu_init;
+ driver->get = us3_freq_get;
driver->exit = us3_freq_cpu_exit;
driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-III");
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index dfbc7e0dcf70..99c809a1e5ac 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -318,7 +318,7 @@ fill_fixup_dax:
nop
rdpr %pstate, %l1 ! Prepare to change globals.
mov %g4, %o1 ! Setup args for
- mov %g5, %o2 ! final call to data_access_exception.
+ mov %g5, %o2 ! final call to spitfire_data_access_exception.
andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
mov %g6, %o7 ! Stash away current.
@@ -330,7 +330,7 @@ fill_fixup_dax:
mov TSB_REG, %g1
ldxa [%g1] ASI_IMMU, %g5
#endif
- call data_access_exception
+ call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0
b,pt %xcc, rtrap
@@ -391,7 +391,7 @@ window_dax_from_user_common:
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
- call data_access_exception
+ call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 40dbeec7e5d6..6201f1040982 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
copy_in_user.o user_fixup.o memmove.o \
- mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
+ mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o mb.o
lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
diff --git a/arch/sparc64/lib/PeeCeeI.c b/arch/sparc64/lib/PeeCeeI.c
index 3008d536e8c2..3c6cfbb20360 100644
--- a/arch/sparc64/lib/PeeCeeI.c
+++ b/arch/sparc64/lib/PeeCeeI.c
@@ -7,28 +7,31 @@
#include <asm/io.h>
#include <asm/byteorder.h>
-void outsb(void __iomem *addr, const void *src, unsigned long count)
+void outsb(unsigned long __addr, const void *src, unsigned long count)
{
+ void __iomem *addr = (void __iomem *) __addr;
const u8 *p = src;
- while(count--)
+ while (count--)
outb(*p++, addr);
}
-void outsw(void __iomem *addr, const void *src, unsigned long count)
+void outsw(unsigned long __addr, const void *src, unsigned long count)
{
- if(count) {
+ void __iomem *addr = (void __iomem *) __addr;
+
+ if (count) {
u16 *ps = (u16 *)src;
u32 *pi;
- if(((u64)src) & 0x2) {
+ if (((u64)src) & 0x2) {
u16 val = le16_to_cpup(ps);
outw(val, addr);
ps++;
count--;
}
pi = (u32 *)ps;
- while(count >= 2) {
+ while (count >= 2) {
u32 w = le32_to_cpup(pi);
pi++;
@@ -37,19 +40,21 @@ void outsw(void __iomem *addr, const void *src, unsigned long count)
count -= 2;
}
ps = (u16 *)pi;
- if(count) {
+ if (count) {
u16 val = le16_to_cpup(ps);
outw(val, addr);
}
}
}
-void outsl(void __iomem *addr, const void *src, unsigned long count)
+void outsl(unsigned long __addr, const void *src, unsigned long count)
{
- if(count) {
- if((((u64)src) & 0x3) == 0) {
+ void __iomem *addr = (void __iomem *) __addr;
+
+ if (count) {
+ if ((((u64)src) & 0x3) == 0) {
u32 *p = (u32 *)src;
- while(count--) {
+ while (count--) {
u32 val = cpu_to_le32p(p);
outl(val, addr);
p++;
@@ -60,13 +65,13 @@ void outsl(void __iomem *addr, const void *src, unsigned long count)
u32 l = 0, l2;
u32 *pi;
- switch(((u64)src) & 0x3) {
+ switch (((u64)src) & 0x3) {
case 0x2:
count -= 1;
l = cpu_to_le16p(ps) << 16;
ps++;
pi = (u32 *)ps;
- while(count--) {
+ while (count--) {
l2 = cpu_to_le32p(pi);
pi++;
outl(((l >> 16) | (l2 << 16)), addr);
@@ -86,7 +91,7 @@ void outsl(void __iomem *addr, const void *src, unsigned long count)
ps++;
l |= (l2 << 16);
pi = (u32 *)ps;
- while(count--) {
+ while (count--) {
l2 = cpu_to_le32p(pi);
pi++;
outl(((l >> 8) | (l2 << 24)), addr);
@@ -101,7 +106,7 @@ void outsl(void __iomem *addr, const void *src, unsigned long count)
pb = (u8 *)src;
l = (*pb++ << 24);
pi = (u32 *)pb;
- while(count--) {
+ while (count--) {
l2 = cpu_to_le32p(pi);
pi++;
outl(((l >> 24) | (l2 << 8)), addr);
@@ -119,16 +124,18 @@ void outsl(void __iomem *addr, const void *src, unsigned long count)
}
}
-void insb(void __iomem *addr, void *dst, unsigned long count)
+void insb(unsigned long __addr, void *dst, unsigned long count)
{
- if(count) {
+ void __iomem *addr = (void __iomem *) __addr;
+
+ if (count) {
u32 *pi;
u8 *pb = dst;
- while((((unsigned long)pb) & 0x3) && count--)
+ while ((((unsigned long)pb) & 0x3) && count--)
*pb++ = inb(addr);
pi = (u32 *)pb;
- while(count >= 4) {
+ while (count >= 4) {
u32 w;
w = (inb(addr) << 24);
@@ -139,23 +146,25 @@ void insb(void __iomem *addr, void *dst, unsigned long count)
count -= 4;
}
pb = (u8 *)pi;
- while(count--)
+ while (count--)
*pb++ = inb(addr);
}
}
-void insw(void __iomem *addr, void *dst, unsigned long count)
+void insw(unsigned long __addr, void *dst, unsigned long count)
{
- if(count) {
+ void __iomem *addr = (void __iomem *) __addr;
+
+ if (count) {
u16 *ps = dst;
u32 *pi;
- if(((unsigned long)ps) & 0x2) {
+ if (((unsigned long)ps) & 0x2) {
*ps++ = le16_to_cpu(inw(addr));
count--;
}
pi = (u32 *)ps;
- while(count >= 2) {
+ while (count >= 2) {
u32 w;
w = (le16_to_cpu(inw(addr)) << 16);
@@ -164,31 +173,33 @@ void insw(void __iomem *addr, void *dst, unsigned long count)
count -= 2;
}
ps = (u16 *)pi;
- if(count)
+ if (count)
*ps = le16_to_cpu(inw(addr));
}
}
-void insl(void __iomem *addr, void *dst, unsigned long count)
+void insl(unsigned long __addr, void *dst, unsigned long count)
{
- if(count) {
- if((((unsigned long)dst) & 0x3) == 0) {
+ void __iomem *addr = (void __iomem *) __addr;
+
+ if (count) {
+ if ((((unsigned long)dst) & 0x3) == 0) {
u32 *pi = dst;
- while(count--)
+ while (count--)
*pi++ = le32_to_cpu(inl(addr));
} else {
u32 l = 0, l2, *pi;
u16 *ps;
u8 *pb;
- switch(((unsigned long)dst) & 3) {
+ switch (((unsigned long)dst) & 3) {
case 0x2:
ps = dst;
count -= 1;
l = le32_to_cpu(inl(addr));
*ps++ = l;
pi = (u32 *)ps;
- while(count--) {
+ while (count--) {
l2 = le32_to_cpu(inl(addr));
*pi++ = (l << 16) | (l2 >> 16);
l = l2;
@@ -205,7 +216,7 @@ void insl(void __iomem *addr, void *dst, unsigned long count)
ps = (u16 *)pb;
*ps++ = ((l >> 8) & 0xffff);
pi = (u32 *)ps;
- while(count--) {
+ while (count--) {
l2 = le32_to_cpu(inl(addr));
*pi++ = (l << 24) | (l2 >> 8);
l = l2;
@@ -220,7 +231,7 @@ void insl(void __iomem *addr, void *dst, unsigned long count)
l = le32_to_cpu(inl(addr));
*pb++ = l >> 24;
pi = (u32 *)pb;
- while(count--) {
+ while (count--) {
l2 = le32_to_cpu(inl(addr));
*pi++ = (l << 8) | (l2 >> 24);
l = l2;
diff --git a/arch/sparc64/lib/copy_page.S b/arch/sparc64/lib/copy_page.S
index 23ebf2c970b7..feebb14fd27a 100644
--- a/arch/sparc64/lib/copy_page.S
+++ b/arch/sparc64/lib/copy_page.S
@@ -87,7 +87,7 @@ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
membar #Sync
wrpr %o2, 0x0, %pstate
- BRANCH_IF_ANY_CHEETAH(g3,o2,1f)
+cheetah_copy_page_insn:
ba,pt %xcc, 9f
nop
@@ -240,3 +240,14 @@ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
stw %o4, [%g6 + TI_PRE_COUNT]
.size copy_user_page, .-copy_user_page
+
+ .globl cheetah_patch_copy_page
+cheetah_patch_copy_page:
+ sethi %hi(0x01000000), %o1 ! NOP
+ sethi %hi(cheetah_copy_page_insn), %o0
+ or %o0, %lo(cheetah_copy_page_insn), %o0
+ stw %o1, [%o0]
+ membar #StoreStore
+ flush %o0
+ retl
+ nop
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
index f03344cf784e..f5f0b5586f01 100644
--- a/arch/sparc64/lib/debuglocks.c
+++ b/arch/sparc64/lib/debuglocks.c
@@ -12,8 +12,6 @@
#ifdef CONFIG_SMP
-#define GET_CALLER(PC) __asm__ __volatile__("mov %%i7, %0" : "=r" (PC))
-
static inline void show (char *str, spinlock_t *lock, unsigned long caller)
{
int cpu = smp_processor_id();
@@ -51,20 +49,19 @@ static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
#undef INIT_STUCK
#define INIT_STUCK 100000000
-void _do_spin_lock(spinlock_t *lock, char *str)
+void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
- GET_CALLER(caller);
again:
__asm__ __volatile__("ldstub [%1], %0"
: "=r" (val)
: "r" (&(lock->lock))
: "memory");
- membar("#StoreLoad | #StoreStore");
+ membar_storeload_storestore();
if (val) {
while (lock->lock) {
if (!--stuck) {
@@ -72,7 +69,7 @@ again:
show(str, lock, caller);
stuck = INIT_STUCK;
}
- membar("#LoadLoad");
+ rmb();
}
goto again;
}
@@ -84,17 +81,16 @@ again:
put_cpu();
}
-int _do_spin_trylock(spinlock_t *lock)
+int _do_spin_trylock(spinlock_t *lock, unsigned long caller)
{
- unsigned long val, caller;
+ unsigned long val;
int cpu = get_cpu();
- GET_CALLER(caller);
__asm__ __volatile__("ldstub [%1], %0"
: "=r" (val)
: "r" (&(lock->lock))
: "memory");
- membar("#StoreLoad | #StoreStore");
+ membar_storeload_storestore();
if (!val) {
lock->owner_pc = ((unsigned int)caller);
lock->owner_cpu = cpu;
@@ -111,21 +107,20 @@ void _do_spin_unlock(spinlock_t *lock)
{
lock->owner_pc = 0;
lock->owner_cpu = NO_PROC_ID;
- membar("#StoreStore | #LoadStore");
+ membar_storestore_loadstore();
lock->lock = 0;
current->thread.smp_lock_count--;
}
/* Keep INIT_STUCK the same... */
-void _do_read_lock (rwlock_t *rw, char *str)
+void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
- GET_CALLER(caller);
wlock_again:
/* Wait for any writer to go away. */
while (((long)(rw->lock)) < 0) {
@@ -134,7 +129,7 @@ wlock_again:
show_read(str, rw, caller);
stuck = INIT_STUCK;
}
- membar("#LoadLoad");
+ rmb();
}
/* Try once to increment the counter. */
__asm__ __volatile__(
@@ -147,7 +142,7 @@ wlock_again:
"2:" : "=r" (val)
: "0" (&(rw->lock))
: "g1", "g7", "memory");
- membar("#StoreLoad | #StoreStore");
+ membar_storeload_storestore();
if (val)
goto wlock_again;
rw->reader_pc[cpu] = ((unsigned int)caller);
@@ -157,15 +152,13 @@ wlock_again:
put_cpu();
}
-void _do_read_unlock (rwlock_t *rw, char *str)
+void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
- GET_CALLER(caller);
-
/* Drop our identity _first_. */
rw->reader_pc[cpu] = 0;
current->thread.smp_lock_count--;
@@ -193,14 +186,13 @@ runlock_again:
put_cpu();
}
-void _do_write_lock (rwlock_t *rw, char *str)
+void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
- GET_CALLER(caller);
wlock_again:
/* Spin while there is another writer. */
while (((long)rw->lock) < 0) {
@@ -209,7 +201,7 @@ wlock_again:
show_write(str, rw, caller);
stuck = INIT_STUCK;
}
- membar("#LoadLoad");
+ rmb();
}
/* Try to acuire the write bit. */
@@ -264,7 +256,7 @@ wlock_again:
show_write(str, rw, caller);
stuck = INIT_STUCK;
}
- membar("#LoadLoad");
+ rmb();
}
goto wlock_again;
}
@@ -278,14 +270,12 @@ wlock_again:
put_cpu();
}
-void _do_write_unlock(rwlock_t *rw)
+void _do_write_unlock(rwlock_t *rw, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int shown = 0;
- GET_CALLER(caller);
-
/* Drop our identity _first_ */
rw->writer_pc = 0;
rw->writer_cpu = NO_PROC_ID;
@@ -313,13 +303,11 @@ wlock_again:
}
}
-int _do_write_trylock (rwlock_t *rw, char *str)
+int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int cpu = get_cpu();
- GET_CALLER(caller);
-
/* Try to acuire the write bit. */
__asm__ __volatile__(
" mov 1, %%g3\n"
diff --git a/arch/sparc64/lib/mb.S b/arch/sparc64/lib/mb.S
new file mode 100644
index 000000000000..4004f748619f
--- /dev/null
+++ b/arch/sparc64/lib/mb.S
@@ -0,0 +1,73 @@
+/* mb.S: Out of line memory barriers.
+ *
+ * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
+ */
+
+ /* These are here in an effort to more fully work around
+ * Spitfire Errata #51. Essentially, if a memory barrier
+ * occurs soon after a mispredicted branch, the chip can stop
+ * executing instructions until a trap occurs. Therefore, if
+ * interrupts are disabled, the chip can hang forever.
+ *
+ * It used to be believed that the memory barrier had to be
+ * right in the delay slot, but a case has been traced
+ * recently wherein the memory barrier was one instruction
+ * after the branch delay slot and the chip still hung. The
+ * offending sequence was the following in sym_wakeup_done()
+ * of the sym53c8xx_2 driver:
+ *
+ * call sym_ccb_from_dsa, 0
+ * movge %icc, 0, %l0
+ * brz,pn %o0, .LL1303
+ * mov %o0, %l2
+ * membar #LoadLoad
+ *
+ * The branch has to be mispredicted for the bug to occur.
+ * Therefore, we put the memory barrier explicitly into a
+ * "branch always, predicted taken" delay slot to avoid the
+ * problem case.
+ */
+
+ .text
+
+99: retl
+ nop
+
+ .globl mb
+mb: ba,pt %xcc, 99b
+ membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad
+ .size mb, .-mb
+
+ .globl rmb
+rmb: ba,pt %xcc, 99b
+ membar #LoadLoad
+ .size rmb, .-rmb
+
+ .globl wmb
+wmb: ba,pt %xcc, 99b
+ membar #StoreStore
+ .size wmb, .-wmb
+
+ .globl membar_storeload
+membar_storeload:
+ ba,pt %xcc, 99b
+ membar #StoreLoad
+ .size membar_storeload, .-membar_storeload
+
+ .globl membar_storeload_storestore
+membar_storeload_storestore:
+ ba,pt %xcc, 99b
+ membar #StoreLoad | #StoreStore
+ .size membar_storeload_storestore, .-membar_storeload_storestore
+
+ .globl membar_storeload_loadload
+membar_storeload_loadload:
+ ba,pt %xcc, 99b
+ membar #StoreLoad | #LoadLoad
+ .size membar_storeload_loadload, .-membar_storeload_loadload
+
+ .globl membar_storestore_loadstore
+membar_storestore_loadstore:
+ ba,pt %xcc, 99b
+ membar #StoreStore | #LoadStore
+ .size membar_storestore_loadstore, .-membar_storestore_loadstore
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 6b31f6117a95..c954d91f01d0 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -116,37 +116,6 @@ static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned
return 0;
}
-int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
-{
- int error = 0;
- pgd_t * dir;
- unsigned long beg = from;
- unsigned long end = from + size;
- struct mm_struct *mm = vma->vm_mm;
-
- prot = __pgprot(pg_iobits);
- offset -= from;
- dir = pgd_offset(mm, from);
- flush_cache_range(vma, beg, end);
-
- spin_lock(&mm->page_table_lock);
- while (from < end) {
- pud_t *pud = pud_alloc(mm, dir, from);
- error = -ENOMEM;
- if (!pud)
- break;
- error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
- if (error)
- break;
- from = (from + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- }
- flush_tlb_range(vma, beg, end);
- spin_unlock(&mm->page_table_lock);
-
- return error;
-}
-
int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 8fc413cb6acd..3fbaf342a452 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -121,15 +121,24 @@ __inline__ void flush_dcache_page_impl(struct page *page)
}
#define PG_dcache_dirty PG_arch_1
+#define PG_dcache_cpu_shift 24
+#define PG_dcache_cpu_mask (256 - 1)
+
+#if NR_CPUS > 256
+#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
+#endif
#define dcache_dirty_cpu(page) \
- (((page)->flags >> 24) & (NR_CPUS - 1UL))
+ (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
{
unsigned long mask = this_cpu;
- unsigned long non_cpu_bits = ~((NR_CPUS - 1UL) << 24UL);
- mask = (mask << 24) | (1UL << PG_dcache_dirty);
+ unsigned long non_cpu_bits;
+
+ non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
+ mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
+
__asm__ __volatile__("1:\n\t"
"ldx [%2], %%g7\n\t"
"and %%g7, %1, %%g1\n\t"
@@ -151,7 +160,7 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
__asm__ __volatile__("! test_and_clear_dcache_dirty\n"
"1:\n\t"
"ldx [%2], %%g7\n\t"
- "srlx %%g7, 24, %%g1\n\t"
+ "srlx %%g7, %4, %%g1\n\t"
"and %%g1, %3, %%g1\n\t"
"cmp %%g1, %0\n\t"
"bne,pn %%icc, 2f\n\t"
@@ -164,7 +173,8 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
"2:"
: /* no outputs */
: "r" (cpu), "r" (mask), "r" (&page->flags),
- "i" (NR_CPUS - 1UL)
+ "i" (PG_dcache_cpu_mask),
+ "i" (PG_dcache_cpu_shift)
: "g1", "g7");
}
@@ -180,7 +190,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
if (pfn_valid(pfn) &&
(page = pfn_to_page(pfn), page_mapping(page)) &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
- int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
+ int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
+ PG_dcache_cpu_mask);
int this_cpu = get_cpu();
/* This is just to optimize away some function calls
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 363770893797..8dfa825eca51 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -10,6 +10,7 @@
#include <asm/page.h>
#include <asm/spitfire.h>
#include <asm/mmu_context.h>
+#include <asm/mmu.h>
#include <asm/pil.h>
#include <asm/head.h>
#include <asm/thread_info.h>
@@ -45,6 +46,8 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
nop
nop
nop
+ nop
+ nop
.align 32
.globl __flush_tlb_pending
@@ -73,6 +76,9 @@ __flush_tlb_pending:
retl
wrpr %g7, 0x0, %pstate
nop
+ nop
+ nop
+ nop
.align 32
.globl __flush_tlb_kernel_range
@@ -224,16 +230,8 @@ __update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
or %o5, %o0, %o5
ba,a,pt %xcc, __prefill_itlb
- /* Cheetah specific versions, patched at boot time.
- *
- * This writes of the PRIMARY_CONTEXT register in this file are
- * safe even on Cheetah+ and later wrt. the page size fields.
- * The nucleus page size fields do not matter because we make
- * no data references, and these instructions execute out of a
- * locked I-TLB entry sitting in the fully assosciative I-TLB.
- * This sequence should also never trap.
- */
-__cheetah_flush_tlb_mm: /* 15 insns */
+ /* Cheetah specific versions, patched at boot time. */
+__cheetah_flush_tlb_mm: /* 18 insns */
rdpr %pstate, %g7
andn %g7, PSTATE_IE, %g2
wrpr %g2, 0x0, %pstate
@@ -241,6 +239,9 @@ __cheetah_flush_tlb_mm: /* 15 insns */
mov PRIMARY_CONTEXT, %o2
mov 0x40, %g3
ldxa [%o2] ASI_DMMU, %g2
+ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
+ sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
+ or %o0, %o1, %o0 /* Preserve nucleus page size fields */
stxa %o0, [%o2] ASI_DMMU
stxa %g0, [%g3] ASI_DMMU_DEMAP
stxa %g0, [%g3] ASI_IMMU_DEMAP
@@ -250,7 +251,7 @@ __cheetah_flush_tlb_mm: /* 15 insns */
retl
wrpr %g7, 0x0, %pstate
-__cheetah_flush_tlb_pending: /* 23 insns */
+__cheetah_flush_tlb_pending: /* 26 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7
sllx %o1, 3, %o1
@@ -259,6 +260,9 @@ __cheetah_flush_tlb_pending: /* 23 insns */
wrpr %g0, 1, %tl
mov PRIMARY_CONTEXT, %o4
ldxa [%o4] ASI_DMMU, %g2
+ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
+ sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
+ or %o0, %o3, %o0 /* Preserve nucleus page size fields */
stxa %o0, [%o4] ASI_DMMU
1: sub %o1, (1 << 3), %o1
ldx [%o2 + %o1], %o3
@@ -311,14 +315,14 @@ cheetah_patch_cachetlbops:
sethi %hi(__cheetah_flush_tlb_mm), %o1
or %o1, %lo(__cheetah_flush_tlb_mm), %o1
call cheetah_patch_one
- mov 15, %o2
+ mov 18, %o2
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__cheetah_flush_tlb_pending), %o1
or %o1, %lo(__cheetah_flush_tlb_pending), %o1
call cheetah_patch_one
- mov 23, %o2
+ mov 26, %o2
#ifdef DCACHE_ALIASING_POSSIBLE
sethi %hi(__flush_dcache_page), %o0
@@ -352,9 +356,12 @@ cheetah_patch_cachetlbops:
.globl xcall_flush_tlb_mm
xcall_flush_tlb_mm:
mov PRIMARY_CONTEXT, %g2
- mov 0x40, %g4
ldxa [%g2] ASI_DMMU, %g3
+ srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
+ sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
+ or %g5, %g4, %g5 /* Preserve nucleus page size fields */
stxa %g5, [%g2] ASI_DMMU
+ mov 0x40, %g4
stxa %g0, [%g4] ASI_DMMU_DEMAP
stxa %g0, [%g4] ASI_IMMU_DEMAP
stxa %g3, [%g2] ASI_DMMU
@@ -366,6 +373,10 @@ xcall_flush_tlb_pending:
sllx %g1, 3, %g1
mov PRIMARY_CONTEXT, %g4
ldxa [%g4] ASI_DMMU, %g2
+ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
+ sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
+ or %g5, %g4, %g5
+ mov PRIMARY_CONTEXT, %g4
stxa %g5, [%g4] ASI_DMMU
1: sub %g1, (1 << 3), %g1
ldx [%g7 + %g1], %g5
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c
index 15b4cfe07557..302efbcba70e 100644
--- a/arch/sparc64/solaris/misc.c
+++ b/arch/sparc64/solaris/misc.c
@@ -737,7 +737,8 @@ MODULE_LICENSE("GPL");
extern u32 tl0_solaris[8];
#define update_ttable(x) \
tl0_solaris[3] = (((long)(x) - (long)tl0_solaris - 3) >> 2) | 0x40000000; \
- __asm__ __volatile__ ("membar #StoreStore; flush %0" : : "r" (&tl0_solaris[3]))
+ wmb(); \
+ __asm__ __volatile__ ("flush %0" : : "r" (&tl0_solaris[3]))
#else
#endif
@@ -761,7 +762,8 @@ int init_module(void)
entry64_personality_patch |=
(offsetof(struct task_struct, personality) +
(sizeof(unsigned long) - 1));
- __asm__ __volatile__("membar #StoreStore; flush %0"
+ wmb();
+ __asm__ __volatile__("flush %0"
: : "r" (&entry64_personality_patch));
return 0;
}
diff --git a/arch/sparc64/solaris/socket.c b/arch/sparc64/solaris/socket.c
index 06740582717e..d3a66ea74a7f 100644
--- a/arch/sparc64/solaris/socket.c
+++ b/arch/sparc64/solaris/socket.c
@@ -16,6 +16,7 @@
#include <linux/net.h>
#include <linux/compat.h>
#include <net/compat.h>
+#include <net/sock.h>
#include <asm/uaccess.h>
#include <asm/string.h>
@@ -297,121 +298,165 @@ asmlinkage int solaris_sendmsg(int fd, struct sol_nmsghdr __user *user_msg, unsi
{
struct socket *sock;
char address[MAX_SOCK_ADDR];
- struct iovec iov[UIO_FASTIOV];
+ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
unsigned char ctl[sizeof(struct cmsghdr) + 20];
unsigned char *ctl_buf = ctl;
- struct msghdr kern_msg;
- int err, total_len;
+ struct msghdr msg_sys;
+ int err, ctl_len, iov_size, total_len;
- if(msghdr_from_user32_to_kern(&kern_msg, user_msg))
- return -EFAULT;
- if(kern_msg.msg_iovlen > UIO_MAXIOV)
- return -EINVAL;
- err = verify_compat_iovec(&kern_msg, iov, address, VERIFY_READ);
- if (err < 0)
+ err = -EFAULT;
+ if (msghdr_from_user32_to_kern(&msg_sys, user_msg))
+ goto out;
+
+ sock = sockfd_lookup(fd, &err);
+ if (!sock)
goto out;
+
+ /* do not move before msg_sys is valid */
+ err = -EMSGSIZE;
+ if (msg_sys.msg_iovlen > UIO_MAXIOV)
+ goto out_put;
+
+ /* Check whether to allocate the iovec area*/
+ err = -ENOMEM;
+ iov_size = msg_sys.msg_iovlen * sizeof(struct iovec);
+ if (msg_sys.msg_iovlen > UIO_FASTIOV) {
+ iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+ if (!iov)
+ goto out_put;
+ }
+
+ err = verify_compat_iovec(&msg_sys, iov, address, VERIFY_READ);
+ if (err < 0)
+ goto out_freeiov;
total_len = err;
- if(kern_msg.msg_controllen) {
- struct sol_cmsghdr __user *ucmsg = kern_msg.msg_control;
+ err = -ENOBUFS;
+ if (msg_sys.msg_controllen > INT_MAX)
+ goto out_freeiov;
+
+ ctl_len = msg_sys.msg_controllen;
+ if (ctl_len) {
+ struct sol_cmsghdr __user *ucmsg = msg_sys.msg_control;
unsigned long *kcmsg;
compat_size_t cmlen;
- if (kern_msg.msg_controllen <= sizeof(compat_size_t))
- return -EINVAL;
+ err = -EINVAL;
+ if (ctl_len <= sizeof(compat_size_t))
+ goto out_freeiov;
- if(kern_msg.msg_controllen > sizeof(ctl)) {
+ if (ctl_len > sizeof(ctl)) {
err = -ENOBUFS;
- ctl_buf = kmalloc(kern_msg.msg_controllen, GFP_KERNEL);
- if(!ctl_buf)
+ ctl_buf = kmalloc(ctl_len, GFP_KERNEL);
+ if (!ctl_buf)
goto out_freeiov;
}
__get_user(cmlen, &ucmsg->cmsg_len);
kcmsg = (unsigned long *) ctl_buf;
*kcmsg++ = (unsigned long)cmlen;
err = -EFAULT;
- if(copy_from_user(kcmsg, &ucmsg->cmsg_level,
- kern_msg.msg_controllen - sizeof(compat_size_t)))
+ if (copy_from_user(kcmsg, &ucmsg->cmsg_level,
+ ctl_len - sizeof(compat_size_t)))
goto out_freectl;
- kern_msg.msg_control = ctl_buf;
+ msg_sys.msg_control = ctl_buf;
}
- kern_msg.msg_flags = solaris_to_linux_msgflags(user_flags);
+ msg_sys.msg_flags = solaris_to_linux_msgflags(user_flags);
- lock_kernel();
- sock = sockfd_lookup(fd, &err);
- if (sock != NULL) {
- if (sock->file->f_flags & O_NONBLOCK)
- kern_msg.msg_flags |= MSG_DONTWAIT;
- err = sock_sendmsg(sock, &kern_msg, total_len);
- sockfd_put(sock);
- }
- unlock_kernel();
+ if (sock->file->f_flags & O_NONBLOCK)
+ msg_sys.msg_flags |= MSG_DONTWAIT;
+ err = sock_sendmsg(sock, &msg_sys, total_len);
out_freectl:
- /* N.B. Use kfree here, as kern_msg.msg_controllen might change? */
- if(ctl_buf != ctl)
- kfree(ctl_buf);
+ if (ctl_buf != ctl)
+ sock_kfree_s(sock->sk, ctl_buf, ctl_len);
out_freeiov:
- if(kern_msg.msg_iov != iov)
- kfree(kern_msg.msg_iov);
-out:
+ if (iov != iovstack)
+ sock_kfree_s(sock->sk, iov, iov_size);
+out_put:
+ sockfd_put(sock);
+out:
return err;
}
asmlinkage int solaris_recvmsg(int fd, struct sol_nmsghdr __user *user_msg, unsigned int user_flags)
{
- struct iovec iovstack[UIO_FASTIOV];
- struct msghdr kern_msg;
- char addr[MAX_SOCK_ADDR];
struct socket *sock;
+ struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
+ struct msghdr msg_sys;
+ unsigned long cmsg_ptr;
+ int err, iov_size, total_len, len;
+
+ /* kernel mode address */
+ char addr[MAX_SOCK_ADDR];
+
+ /* user mode address pointers */
struct sockaddr __user *uaddr;
int __user *uaddr_len;
- unsigned long cmsg_ptr;
- int err, total_len, len = 0;
- if(msghdr_from_user32_to_kern(&kern_msg, user_msg))
+ if (msghdr_from_user32_to_kern(&msg_sys, user_msg))
return -EFAULT;
- if(kern_msg.msg_iovlen > UIO_MAXIOV)
- return -EINVAL;
- uaddr = kern_msg.msg_name;
+ sock = sockfd_lookup(fd, &err);
+ if (!sock)
+ goto out;
+
+ err = -EMSGSIZE;
+ if (msg_sys.msg_iovlen > UIO_MAXIOV)
+ goto out_put;
+
+ /* Check whether to allocate the iovec area*/
+ err = -ENOMEM;
+ iov_size = msg_sys.msg_iovlen * sizeof(struct iovec);
+ if (msg_sys.msg_iovlen > UIO_FASTIOV) {
+ iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+ if (!iov)
+ goto out_put;
+ }
+
+ /*
+ * Save the user-mode address (verify_iovec will change the
+ * kernel msghdr to use the kernel address space)
+ */
+
+ uaddr = (void __user *) msg_sys.msg_name;
uaddr_len = &user_msg->msg_namelen;
- err = verify_compat_iovec(&kern_msg, iov, addr, VERIFY_WRITE);
+ err = verify_compat_iovec(&msg_sys, iov, addr, VERIFY_WRITE);
if (err < 0)
- goto out;
+ goto out_freeiov;
total_len = err;
- cmsg_ptr = (unsigned long) kern_msg.msg_control;
- kern_msg.msg_flags = 0;
+ cmsg_ptr = (unsigned long) msg_sys.msg_control;
+ msg_sys.msg_flags = MSG_CMSG_COMPAT;
- lock_kernel();
- sock = sockfd_lookup(fd, &err);
- if (sock != NULL) {
- if (sock->file->f_flags & O_NONBLOCK)
- user_flags |= MSG_DONTWAIT;
- err = sock_recvmsg(sock, &kern_msg, total_len, user_flags);
- if(err >= 0)
- len = err;
- sockfd_put(sock);
- }
- unlock_kernel();
-
- if(uaddr != NULL && err >= 0)
- err = move_addr_to_user(addr, kern_msg.msg_namelen, uaddr, uaddr_len);
- if(err >= 0) {
- err = __put_user(linux_to_solaris_msgflags(kern_msg.msg_flags), &user_msg->msg_flags);
- if(!err) {
- /* XXX Convert cmsg back into userspace 32-bit format... */
- err = __put_user((unsigned long)kern_msg.msg_control - cmsg_ptr,
- &user_msg->msg_controllen);
- }
+ if (sock->file->f_flags & O_NONBLOCK)
+ user_flags |= MSG_DONTWAIT;
+
+ err = sock_recvmsg(sock, &msg_sys, total_len, user_flags);
+ if(err < 0)
+ goto out_freeiov;
+
+ len = err;
+
+ if (uaddr != NULL) {
+ err = move_addr_to_user(addr, msg_sys.msg_namelen, uaddr, uaddr_len);
+ if (err < 0)
+ goto out_freeiov;
}
+ err = __put_user(linux_to_solaris_msgflags(msg_sys.msg_flags), &user_msg->msg_flags);
+ if (err)
+ goto out_freeiov;
+ err = __put_user((unsigned long)msg_sys.msg_control - cmsg_ptr,
+ &user_msg->msg_controllen);
+ if (err)
+ goto out_freeiov;
+ err = len;
- if(kern_msg.msg_iov != iov)
- kfree(kern_msg.msg_iov);
+out_freeiov:
+ if (iov != iovstack)
+ sock_kfree_s(sock->sk, iov, iov_size);
+out_put:
+ sockfd_put(sock);
out:
- if(err < 0)
- return err;
- return len;
+ return err;
}