summaryrefslogtreecommitdiffstats
path: root/arch/alpha/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/include')
-rw-r--r--arch/alpha/include/asm/asm-offsets.h1
-rw-r--r--arch/alpha/include/asm/bug.h3
-rw-r--r--arch/alpha/include/asm/core_t2.h34
-rw-r--r--arch/alpha/include/asm/elf.h1
-rw-r--r--arch/alpha/include/asm/fcntl.h19
-rw-r--r--arch/alpha/include/asm/perf_event.h9
-rw-r--r--arch/alpha/include/asm/spinlock.h38
-rw-r--r--arch/alpha/include/asm/spinlock_types.h8
-rw-r--r--arch/alpha/include/asm/unistd.h17
9 files changed, 84 insertions, 46 deletions
diff --git a/arch/alpha/include/asm/asm-offsets.h b/arch/alpha/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/alpha/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/alpha/include/asm/bug.h b/arch/alpha/include/asm/bug.h
index 1720c8ad86fe..f091682e3cc8 100644
--- a/arch/alpha/include/asm/bug.h
+++ b/arch/alpha/include/asm/bug.h
@@ -13,7 +13,8 @@
"call_pal %0 # bugchk\n\t" \
".long %1\n\t.8byte %2" \
: : "i"(PAL_bugchk), "i"(__LINE__), "i"(__FILE__)); \
- for ( ; ; ); } while (0)
+ unreachable(); \
+ } while (0)
#define HAVE_ARCH_BUG
#endif
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h
index 46bfff58f670..471c07292e0b 100644
--- a/arch/alpha/include/asm/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
@@ -435,7 +435,7 @@ extern inline void t2_outl(u32 b, unsigned long addr)
set_hae(msb); \
}
-extern spinlock_t t2_hae_lock;
+extern raw_spinlock_t t2_hae_lock;
/*
* NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
@@ -448,12 +448,12 @@ __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long result, msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extbl(result, addr & 3);
}
@@ -462,12 +462,12 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long result, msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extwl(result, addr & 3);
}
@@ -480,12 +480,12 @@ __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long result, msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return result & 0xffffffffUL;
}
@@ -494,14 +494,14 @@ __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long r0, r1, work, msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
r0 = *(vuip)(work);
r1 = *(vuip)(work + (4 << 5));
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return r1 << 32 | r0;
}
@@ -510,13 +510,13 @@ __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb, w;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
@@ -524,13 +524,13 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb, w;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
/*
@@ -542,12 +542,12 @@ __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
@@ -555,14 +555,14 @@ __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb, work;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
*(vuip)work = b;
*(vuip)(work + (4 << 5)) = b >> 32;
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
index 5c75c1b2352a..9baae8afe8a3 100644
--- a/arch/alpha/include/asm/elf.h
+++ b/arch/alpha/include/asm/elf.h
@@ -81,7 +81,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_ALPHA
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/asm/fcntl.h
index 25da0017ec87..70145cbb21cb 100644
--- a/arch/alpha/include/asm/fcntl.h
+++ b/arch/alpha/include/asm/fcntl.h
@@ -1,8 +1,6 @@
#ifndef _ALPHA_FCNTL_H
#define _ALPHA_FCNTL_H
-/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
- located on an ext2 file system */
#define O_CREAT 01000 /* not fcntl */
#define O_TRUNC 02000 /* not fcntl */
#define O_EXCL 04000 /* not fcntl */
@@ -10,13 +8,28 @@
#define O_NONBLOCK 00004
#define O_APPEND 00010
-#define O_SYNC 040000
+#define O_DSYNC 040000 /* used to be O_SYNC, see below */
#define O_DIRECTORY 0100000 /* must be a directory */
#define O_NOFOLLOW 0200000 /* don't follow links */
#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */
#define O_DIRECT 02000000 /* direct disk access - should check with OSF/1 */
#define O_NOATIME 04000000
#define O_CLOEXEC 010000000 /* set close_on_exec */
+/*
+ * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using
+ * the O_SYNC flag. We continue to use the existing numerical value
+ * for O_DSYNC semantics now, but using the correct symbolic name for it.
+ * This new value is used to request true Posix O_SYNC semantics. It is
+ * defined in this strange way to make sure applications compiled against
+ * new headers get at least O_DSYNC semantics on older kernels.
+ *
+ * This has the nice side-effect that we can simply test for O_DSYNC
+ * wherever we do not care if O_DSYNC or O_SYNC is used.
+ *
+ * Note: __O_SYNC must never be used directly.
+ */
+#define __O_SYNC 020000000
+#define O_SYNC (__O_SYNC|O_DSYNC)
#define F_GETLK 7
#define F_SETLK 8
diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h
new file mode 100644
index 000000000000..3bef8522017c
--- /dev/null
+++ b/arch/alpha/include/asm/perf_event.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_ALPHA_PERF_EVENT_H
+#define __ASM_ALPHA_PERF_EVENT_H
+
+/* Alpha only supports software events through this interface. */
+static inline void set_perf_event_pending(void) { }
+
+#define PERF_EVENT_INDEX_OFFSET 0
+
+#endif /* __ASM_ALPHA_PERF_EVENT_H */
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index e38fb95cb335..d0faca1e992d 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -12,18 +12,18 @@
* We make no fairness assumptions. They have a cost.
*/
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_is_locked(x) ((x)->lock != 0)
-#define __raw_spin_unlock_wait(x) \
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_is_locked(x) ((x)->lock != 0)
+#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock)
-static inline void __raw_spin_unlock(raw_spinlock_t * lock)
+static inline void arch_spin_unlock(arch_spinlock_t * lock)
{
mb();
lock->lock = 0;
}
-static inline void __raw_spin_lock(raw_spinlock_t * lock)
+static inline void arch_spin_lock(arch_spinlock_t * lock)
{
long tmp;
@@ -43,24 +43,24 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock)
: "m"(lock->lock) : "memory");
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return !test_and_set_bit(0, &lock->lock);
}
/***********************************************************/
-static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
return (lock->lock & 1) == 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
return lock->lock == 0;
}
-static inline void __raw_read_lock(raw_rwlock_t *lock)
+static inline void arch_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock)
: "m" (*lock) : "memory");
}
-static inline void __raw_write_lock(raw_rwlock_t *lock)
+static inline void arch_write_lock(arch_rwlock_t *lock)
{
long regx;
@@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock)
: "m" (*lock) : "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t * lock)
+static inline int arch_read_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock)
return success;
}
-static inline int __raw_write_trylock(raw_rwlock_t * lock)
+static inline int arch_write_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock)
return success;
}
-static inline void __raw_read_unlock(raw_rwlock_t * lock)
+static inline void arch_read_unlock(arch_rwlock_t * lock)
{
long regx;
__asm__ __volatile__(
@@ -160,17 +160,17 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock)
: "m" (*lock) : "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t * lock)
+static inline void arch_write_unlock(arch_rwlock_t * lock)
{
mb();
lock->lock = 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
index 8141eb5ebf0d..54c2afce0a1d 100644
--- a/arch/alpha/include/asm/spinlock_types.h
+++ b/arch/alpha/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 7f23665122df..804e5311c841 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -247,6 +247,7 @@
#define __IGNORE_pause
#define __IGNORE_time
#define __IGNORE_utime
+#define __IGNORE_umount2
/*
* Linux-specific system calls begin at 300
@@ -434,10 +435,24 @@
#define __NR_timerfd 477
#define __NR_eventfd 478
#define __NR_recvmmsg 479
+#define __NR_fallocate 480
+#define __NR_timerfd_create 481
+#define __NR_timerfd_settime 482
+#define __NR_timerfd_gettime 483
+#define __NR_signalfd4 484
+#define __NR_eventfd2 485
+#define __NR_epoll_create1 486
+#define __NR_dup3 487
+#define __NR_pipe2 488
+#define __NR_inotify_init1 489
+#define __NR_preadv 490
+#define __NR_pwritev 491
+#define __NR_rt_tgsigqueueinfo 492
+#define __NR_perf_event_open 493
#ifdef __KERNEL__
-#define NR_SYSCALLS 480
+#define NR_SYSCALLS 494
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR