diff options
author | Daniel Vetter <[email protected]> | 2013-03-19 09:47:30 +0100 |
---|---|---|
committer | Daniel Vetter <[email protected]> | 2013-03-19 09:47:30 +0100 |
commit | 0d4a42f6bd298e826620585e766a154ab460617a (patch) | |
tree | 406d8f7778691d858dbe3e48e4bbb10e99c0a58a /arch/mips/include/asm/barrier.h | |
parent | d62b4892f3d9f7dd2002e5309be10719d6805b0f (diff) | |
parent | a937536b868b8369b98967929045f1df54234323 (diff) |
Merge tag 'v3.9-rc3' into drm-intel-next-queued
Backmerge so that I can merge Imre Deak's coalesced sg entries fixes,
which depend upon the new for_each_sg_page introduce in
commit a321e91b6d73ed011ffceed384c40d2785cf723b
Author: Imre Deak <[email protected]>
Date: Wed Feb 27 17:02:56 2013 -0800
lib/scatterlist: add simple page iterator
The merge itself is just two trivial conflicts:
Signed-off-by: Daniel Vetter <[email protected]>
Diffstat (limited to 'arch/mips/include/asm/barrier.h')
-rw-r--r-- | arch/mips/include/asm/barrier.h | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index f7fdc24e972d..314ab5532019 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -18,7 +18,7 @@ * over this barrier. All reads preceding this primitive are guaranteed * to access memory (but not necessarily other CPUs' caches) before any * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than + * any of the preceding reads. This primitive is much lighter weight than * rmb() on most CPUs, and is never heavier weight than is * rmb(). * @@ -43,7 +43,7 @@ * </programlisting> * * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, + * two reads are separated by a read_barrier_depends(). However, * the following code, with the same initial values for "a" and "b": * * <programlisting> @@ -57,7 +57,7 @@ * </programlisting> * * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such + * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() * in cases like this where there are no data dependencies. */ @@ -92,7 +92,7 @@ : "memory") #ifdef CONFIG_CPU_CAVIUM_OCTEON # define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n" -# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory") +# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory") # define fast_wmb() __syncw() # define fast_rmb() barrier() @@ -158,7 +158,7 @@ #endif #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) -#define __WEAK_LLSC_MB " sync \n" +#define __WEAK_LLSC_MB " sync \n" #else #define __WEAK_LLSC_MB " \n" #endif |