3 exec-all.h | 165 ------------------------------------------
4 linux-user/arm/syscall.h | 4 -
5 linux-user/main.c | 94 +++++++++++++++++++++---
7 linux-user/syscall.c | 91 ++++++++++++++++++++++-
8 qemu_spinlock.h | 181 +++++++++++++++++++++++++++++++++++++++++++++++
9 target-arm/cpu.h | 10 ++
11 target-arm/translate.c | 9 ++
12 10 files changed, 405 insertions(+), 183 deletions(-)
14 Index: trunk/configure
15 ===================================================================
16 --- trunk.orig/configure 2008-04-24 20:16:52.000000000 +0100
17 +++ trunk/configure 2008-04-24 20:16:53.000000000 +0100
28 *) echo "ERROR: unknown option $opt"; show_help="yes"
30 + --disable-nptl) nptl="no"
36 echo " --disable-linux-user disable all linux usermode emulation targets"
37 echo " --enable-darwin-user enable all darwin usermode emulation targets"
38 echo " --disable-darwin-user disable all darwin usermode emulation targets"
39 +echo " --disable-nptl disable usermode NPTL guest support"
40 echo " --fmod-lib path to FMOD library"
41 echo " --fmod-inc path to FMOD includes"
42 echo " --enable-uname-release=R Return R for uname -r in usermode emulation"
58 +if $cc -c -o $TMPO $TMPC 2> /dev/null ; then
64 ##########################################
68 echo "Documentation $build_docs"
69 [ ! -z "$uname_release" ] && \
70 echo "uname -r $uname_release"
71 +echo "NPTL support $nptl"
73 if test $sdl_too_old = "yes"; then
74 echo "-> Your SDL version is too old - please upgrade to have SDL support"
76 echo "#define TARGET_ARM 1" >> $config_h
77 echo "#define CONFIG_NO_DYNGEN_OP 1" >> $config_h
79 + if test "$nptl" = "yes" ; then
80 + echo "#define USE_NPTL 1" >> $config_h
84 echo "TARGET_ARCH=cris" >> $config_mak
85 Index: trunk/exec-all.h
86 ===================================================================
87 --- trunk.orig/exec-all.h 2008-04-24 20:16:41.000000000 +0100
88 +++ trunk/exec-all.h 2008-04-24 20:16:53.000000000 +0100
90 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
91 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
93 -#if defined(__hppa__)
95 -typedef int spinlock_t[4];
97 -#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
99 -static inline void resetlock (spinlock_t *p)
101 - (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
106 -typedef int spinlock_t;
108 -#define SPIN_LOCK_UNLOCKED 0
110 -static inline void resetlock (spinlock_t *p)
112 - *p = SPIN_LOCK_UNLOCKED;
117 -#if defined(__powerpc__)
118 -static inline int testandset (int *p)
121 - __asm__ __volatile__ (
122 - "0: lwarx %0,0,%1\n"
125 - " stwcx. %2,0,%1\n"
129 - : "r" (p), "r" (1), "r" (0)
130 - : "cr0", "memory");
133 -#elif defined(__i386__)
134 -static inline int testandset (int *p)
136 - long int readval = 0;
138 - __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
139 - : "+m" (*p), "+a" (readval)
144 -#elif defined(__x86_64__)
145 -static inline int testandset (int *p)
147 - long int readval = 0;
149 - __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
150 - : "+m" (*p), "+a" (readval)
155 -#elif defined(__s390__)
156 -static inline int testandset (int *p)
160 - __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
163 - : "r" (1), "a" (p), "0" (*p)
164 - : "cc", "memory" );
167 -#elif defined(__alpha__)
168 -static inline int testandset (int *p)
173 - __asm__ __volatile__ ("0: mov 1,%2\n"
180 - : "=r" (ret), "=m" (*p), "=r" (one)
184 -#elif defined(__sparc__)
185 -static inline int testandset (int *p)
189 - __asm__ __volatile__("ldstub [%1], %0"
194 - return (ret ? 1 : 0);
196 -#elif defined(__arm__)
197 -static inline int testandset (int *spinlock)
199 - register unsigned int ret;
200 - __asm__ __volatile__("swp %0, %1, [%2]"
202 - : "0"(1), "r"(spinlock));
206 -#elif defined(__mc68000)
207 -static inline int testandset (int *p)
210 - __asm__ __volatile__("tas %1; sne %0"
216 -#elif defined(__hppa__)
218 -/* Because malloc only guarantees 8-byte alignment for malloc'd data,
219 - and GCC only guarantees 8-byte alignment for stack locals, we can't
220 - be assured of 16-byte alignment for atomic lock data even if we
221 - specify "__attribute ((aligned(16)))" in the type declaration. So,
222 - we use a struct containing an array of four ints for the atomic lock
223 - type and dynamically select the 16-byte aligned int from the array
224 - for the semaphore. */
225 -#define __PA_LDCW_ALIGNMENT 16
226 -static inline void *ldcw_align (void *p) {
227 - unsigned long a = (unsigned long)p;
228 - a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
232 -static inline int testandset (spinlock_t *p)
236 - __asm__ __volatile__("ldcw 0(%1),%0"
243 -#elif defined(__ia64)
245 -#include <ia64intrin.h>
247 -static inline int testandset (int *p)
249 - return __sync_lock_test_and_set (p, 1);
251 -#elif defined(__mips__)
252 -static inline int testandset (int *p)
256 - __asm__ __volatile__ (
265 - : "=r" (ret), "+R" (*p)
272 -#error unimplemented CPU support
275 -#if defined(CONFIG_USER_ONLY)
276 -static inline void spin_lock(spinlock_t *lock)
278 - while (testandset(lock));
281 -static inline void spin_unlock(spinlock_t *lock)
286 -static inline int spin_trylock(spinlock_t *lock)
288 - return !testandset(lock);
291 -static inline void spin_lock(spinlock_t *lock)
295 -static inline void spin_unlock(spinlock_t *lock)
299 -static inline int spin_trylock(spinlock_t *lock)
304 +#include "qemu_spinlock.h"
306 extern spinlock_t tb_lock;
308 Index: trunk/linux-user/arm/syscall.h
309 ===================================================================
310 --- trunk.orig/linux-user/arm/syscall.h 2008-04-24 20:16:41.000000000 +0100
311 +++ trunk/linux-user/arm/syscall.h 2008-04-24 20:16:53.000000000 +0100
313 #define ARM_SYSCALL_BASE 0x900000
314 #define ARM_THUMB_SYSCALL 0
316 -#define ARM_NR_cacheflush (ARM_SYSCALL_BASE + 0xf0000 + 2)
317 +#define ARM_NR_BASE 0xf0000
318 +#define ARM_NR_cacheflush (ARM_NR_BASE + 2)
319 +#define ARM_NR_set_tls (ARM_NR_BASE + 5)
321 #define ARM_NR_semihosting 0x123456
322 #define ARM_NR_thumb_semihosting 0xAB
323 Index: trunk/linux-user/main.c
324 ===================================================================
325 --- trunk.orig/linux-user/main.c 2008-04-24 20:16:47.000000000 +0100
326 +++ trunk/linux-user/main.c 2008-04-24 20:17:38.000000000 +0100
331 +/* Handle a jump to the kernel code page. */
333 +do_kernel_trap(CPUARMState *env)
339 + switch (env->regs[15]) {
340 + case 0xffff0fc0: /* __kernel_cmpxchg */
341 + /* XXX: This only works between threads, not between processes.
342 + Use native atomic operations. */
343 + /* ??? This probably breaks horribly if the access segfaults. */
345 + ptr = (uint32_t *)env->regs[2];
346 + cpsr = cpsr_read(env);
347 + if (*ptr == env->regs[0]) {
348 + *ptr = env->regs[1];
355 + cpsr_write(env, cpsr, CPSR_C);
358 + case 0xffff0fe0: /* __kernel_get_tls */
359 + env->regs[0] = env->cp15.c13_tls2;
364 + /* Jump back to the caller. */
365 + addr = env->regs[14];
370 + env->regs[15] = addr;
375 void cpu_loop(CPUARMState *env)
382 - if (n == ARM_NR_cacheflush) {
383 - arm_cache_flush(env->regs[0], env->regs[1]);
384 - } else if (n == ARM_NR_semihosting
385 - || n == ARM_NR_thumb_semihosting) {
386 + if (n == ARM_NR_semihosting
387 + || n == ARM_NR_thumb_semihosting) {
388 env->regs[0] = do_arm_semihosting (env);
389 } else if (n == 0 || n >= ARM_SYSCALL_BASE
390 || (env->thumb && n == ARM_THUMB_SYSCALL)) {
391 @@ -489,14 +531,34 @@
392 n -= ARM_SYSCALL_BASE;
395 - env->regs[0] = do_syscall(env,
403 + if ( n > ARM_NR_BASE) {
406 + case ARM_NR_cacheflush:
407 + arm_cache_flush(env->regs[0], env->regs[1]);
410 + case ARM_NR_set_tls:
411 + cpu_set_tls(env, env->regs[0]);
416 + printf ("Error: Bad syscall: %x\n", n);
422 + env->regs[0] = do_syscall(env,
438 + case EXCP_KERNEL_TRAP:
439 + if (do_kernel_trap(env))
444 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
445 @@ -1994,6 +2060,11 @@
446 int drop_ld_preload = 0, environ_count = 0;
447 char **target_environ, **wrk, **dst;
449 + char *assume_kernel = getenv("QEMU_ASSUME_KERNEL");
452 + setenv("LD_ASSUME_KERNEL", assume_kernel, 1);
457 @@ -2403,6 +2474,10 @@
458 ts->heap_base = info->brk;
459 /* This will be filled in on the first SYS_HEAPINFO call. */
461 + /* Register the magic kernel code page. The cpu will generate a
462 + special exception when it tries to execute code here. We can't
463 + put real code here because it may be in use by the host kernel. */
464 + page_set_flags(0xffff0000, 0xffff0fff, 0);
468 Index: trunk/linux-user/qemu.h
469 ===================================================================
470 --- trunk.orig/linux-user/qemu.h 2008-04-24 20:16:41.000000000 +0100
471 +++ trunk/linux-user/qemu.h 2008-04-24 20:16:53.000000000 +0100
477 + uint32_t *child_tidptr;
479 int used; /* non zero if used */
480 struct image_info *info;
482 Index: trunk/linux-user/syscall.c
483 ===================================================================
484 --- trunk.orig/linux-user/syscall.c 2008-04-24 20:16:50.000000000 +0100
485 +++ trunk/linux-user/syscall.c 2008-04-24 20:19:52.000000000 +0100
487 #define tchars host_tchars /* same as target */
488 #define ltchars host_ltchars /* same as target */
490 +#include <linux/futex.h>
491 #include <linux/termios.h>
492 #include <linux/unistd.h>
493 #include <linux/utsname.h>
495 #include <linux/kd.h>
498 +#include "qemu_spinlock.h"
503 +#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
504 + CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
506 +/* XXX: Hardcode the above values. */
507 +#define CLONE_NPTL_FLAGS2 0
510 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
511 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
512 /* 16 bit uid wrappers emulation */
513 @@ -2695,16 +2705,25 @@
518 #endif /* defined(TARGET_I386) */
520 /* this stack is the equivalent of the kernel stack associated with a
522 #define NEW_STACK_SIZE 8192
525 +static spinlock_t nptl_lock = SPIN_LOCK_UNLOCKED;
528 static int clone_func(void *arg)
532 + /* Wait until the parent has finshed initializing the tls state. */
533 + while (!spin_trylock(&nptl_lock))
535 + spin_unlock(&nptl_lock);
540 @@ -2712,15 +2731,27 @@
542 /* do_fork() Must return host values and target errnos (unlike most
543 do_*() functions). */
544 -int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp)
545 +int do_fork(CPUState *env, unsigned int flags, unsigned long newsp,
546 + uint32_t *parent_tidptr, void *newtls,
547 + uint32_t *child_tidptr)
553 +#if defined(TARGET_I386)
554 + uint64_t *new_gdt_table;
557 + unsigned int nptl_flags;
559 + if (flags & CLONE_PARENT_SETTID)
560 + *parent_tidptr = gettid();
562 if (flags & CLONE_VM) {
563 ts = malloc(sizeof(TaskState) + NEW_STACK_SIZE);
566 memset(ts, 0, sizeof(TaskState));
567 new_stack = ts->stack;
569 @@ -2732,6 +2763,29 @@
570 #if defined(TARGET_I386)
572 newsp = env->regs[R_ESP];
573 + new_gdt_table = malloc(9 * 8);
574 + if (!new_gdt_table) {
578 + /* Copy main GDT table from parent, but clear TLS entries */
579 + memcpy(new_gdt_table, g2h(env->gdt.base), 6 * 8);
580 + memset(&new_gdt_table[6], 0, 3 * 8);
581 + new_env->gdt.base = h2g(new_gdt_table);
582 + if (flags & 0x00080000 /* CLONE_SETTLS */) {
583 + ret = do_set_thread_area(new_env, new_env->regs[R_ESI]);
585 + free(new_gdt_table);
590 + cpu_x86_load_seg(env, R_CS, new_env->regs[R_CS]);
591 + cpu_x86_load_seg(env, R_DS, new_env->regs[R_DS]);
592 + cpu_x86_load_seg(env, R_ES, new_env->regs[R_ES]);
593 + cpu_x86_load_seg(env, R_SS, new_env->regs[R_SS]);
594 + cpu_x86_load_seg(env, R_FS, new_env->regs[R_FS]);
595 + cpu_x86_load_seg(env, R_GS, new_env->regs[R_GS]);
596 new_env->regs[R_ESP] = newsp;
597 new_env->regs[R_EAX] = 0;
598 #elif defined(TARGET_ARM)
599 @@ -2784,16 +2838,67 @@
600 #error unsupported target CPU
602 new_env->opaque = ts;
604 + nptl_flags = flags;
605 + flags &= ~CLONE_NPTL_FLAGS2;
607 + if (nptl_flags & CLONE_CHILD_CLEARTID) {
608 + ts->child_tidptr = child_tidptr;
611 + if (nptl_flags & CLONE_SETTLS)
612 + cpu_set_tls (new_env, newtls);
614 + /* Grab the global cpu lock so that the thread setup appears
616 + if (nptl_flags & CLONE_CHILD_SETTID)
617 + spin_lock(&nptl_lock);
620 + if (flags & CLONE_NPTL_FLAGS2)
624 + if (CLONE_VFORK & flags)
627 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
629 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
633 + if (nptl_flags & CLONE_CHILD_SETTID)
634 + *child_tidptr = ret;
637 + /* Allow the child to continue. */
638 + if (nptl_flags & CLONE_CHILD_SETTID)
639 + spin_unlock(&nptl_lock);
642 /* if no CLONE_VM, we consider it is a fork */
643 - if ((flags & ~CSIGNAL) != 0)
644 + if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
648 + /* There is a race condition here. The parent process could
649 + theoretically read the TID in the child process before the child
650 + tid is set. This would require using either ptrace
651 + (not implemented) or having *_tidptr to point at a shared memory
652 + mapping. We can't repeat the spinlock hack used above because
653 + the child process gets its own copy of the lock. */
655 + /* Child Process. */
656 + if (flags & CLONE_CHILD_SETTID)
657 + *child_tidptr = gettid();
658 + ts = (TaskState *)env->opaque;
659 + if (flags & CLONE_CHILD_CLEARTID)
660 + ts->child_tidptr = child_tidptr;
661 + if (flags & CLONE_SETTLS)
662 + cpu_set_tls (env, newtls);
668 @@ -3052,6 +3157,68 @@
669 unlock_user_struct(target_ts, target_addr, 1);
672 +static long do_futex(target_ulong uaddr, int op, uint32_t val,
673 + target_ulong utime, target_ulong uaddr2,
676 + struct timespec host_utime;
677 + unsigned long val2 = utime;
679 + if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
680 + target_to_host_timespec(&host_utime, utime);
681 + val2 = (unsigned long)&host_utime;
686 + case FUTEX_CMP_REQUEUE:
687 + val3 = tswap32(val3);
688 + case FUTEX_REQUEUE:
689 + val2 = tswap32(val2);
692 + val = tswap32(val);
693 + case FUTEX_LOCK_PI: /* This one's icky, but comes out OK */
694 + case FUTEX_UNLOCK_PI:
697 + gemu_log("qemu: Unsupported futex op %d\n", op);
700 +#if 0 /* No, it's worse than this */
701 + if (op == FUTEX_WAKE_OP) {
702 + /* Need to munge the secondary operation (val3) */
703 + val3 = tswap32(val3);
704 + int op2 = (val3 >> 28) & 7;
705 + int cmp = (val3 >> 24) & 15;
706 + int oparg = (val3 << 8) >> 20;
707 + int cmparg = (val3 << 20) >> 20;
708 + int shift = val3 & (FUTEX_OP_OPARG_SHIFT << 28);
711 + oparg = (oparg & 7) + 24 - (oparg & 24);
713 + if (op2 == FUTEX_OP_ADD) {
714 + gemu_log("qemu: Unsupported wrong-endian FUTEX_OP_ADD\n");
717 + if (cmparg == FUTEX_OP_CMP_LT || cmparg == FUTEX_OP_CMP_GE ||
718 + cmparg == FUTEX_OP_CMP_LE || cmparg == FUTEX_OP_CMP_GT) {
719 + gemu_log("qemu: Unsupported wrong-endian futex cmparg %d\n", cmparg);
722 + val3 = shift | (op2<<28) | (cmp<<24) | (oparg<<12) | cmparg;
726 + return syscall(__NR_futex, g2h(uaddr), op, val, val2, g2h(uaddr2), val3);
729 +int do_set_tid_address(target_ulong tidptr)
731 + return syscall(__NR_set_tid_address, g2h(tidptr));
734 /* do_syscall() should always have a single exit point at the end so
735 that actions, such as logging of syscall results, can be performed.
736 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
737 @@ -3076,7 +3243,7 @@
740 gdb_exit(cpu_env, arg1);
741 - /* XXX: should free thread stack and CPU env */
742 + /* XXX: should free thread stack, GDT and CPU env */
744 ret = 0; /* avoid warning */
746 @@ -3118,7 +3285,7 @@
750 - ret = get_errno(do_fork(cpu_env, SIGCHLD, 0));
751 + ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, NULL, NULL, NULL));
753 #ifdef TARGET_NR_waitpid
754 case TARGET_NR_waitpid:
755 @@ -4482,7 +4649,8 @@
756 ret = get_errno(fsync(arg1));
758 case TARGET_NR_clone:
759 - ret = get_errno(do_fork(cpu_env, arg1, arg2));
760 + ret = get_errno(do_fork(cpu_env, arg1, arg2, (uint32_t *)arg3,
761 + (void *)arg4, (uint32_t *)arg5));
763 #ifdef __NR_exit_group
764 /* new thread calls */
765 @@ -4943,7 +5111,8 @@
767 #ifdef TARGET_NR_vfork
768 case TARGET_NR_vfork:
769 - ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0));
770 + ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
771 + NULL, NULL, NULL));
774 #ifdef TARGET_NR_ugetrlimit
775 @@ -5521,6 +5690,9 @@
776 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
777 ret = do_set_thread_area(cpu_env, arg1);
780 + ret = get_errno(do_set_thread_area(cpu_env, arg1));
783 goto unimplemented_nowarn;
785 @@ -5538,6 +5710,12 @@
786 goto unimplemented_nowarn;
789 +#ifdef TARGET_NR_futex
790 + case TARGET_NR_futex:
791 + ret = get_errno(do_futex(arg1, arg2, arg3, arg4, arg5, arg6));
795 #ifdef TARGET_NR_clock_gettime
796 case TARGET_NR_clock_gettime:
798 Index: trunk/qemu_spinlock.h
799 ===================================================================
800 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
801 +++ trunk/qemu_spinlock.h 2008-04-24 20:16:53.000000000 +0100
804 + * Atomic operation helper include
806 + * Copyright (c) 2005 Fabrice Bellard
808 + * This library is free software; you can redistribute it and/or
809 + * modify it under the terms of the GNU Lesser General Public
810 + * License as published by the Free Software Foundation; either
811 + * version 2 of the License, or (at your option) any later version.
813 + * This library is distributed in the hope that it will be useful,
814 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
815 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
816 + * Lesser General Public License for more details.
818 + * You should have received a copy of the GNU Lesser General Public
819 + * License along with this library; if not, write to the Free Software
820 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
822 +#ifndef QEMU_SPINLOCK_H
823 +#define QEMU_SPINLOCK_H
826 +static inline int testandset (int *p)
829 + __asm__ __volatile__ (
830 + "0: lwarx %0,0,%1\n"
833 + " stwcx. %2,0,%1\n"
837 + : "r" (p), "r" (1), "r" (0)
838 + : "cr0", "memory");
844 +static inline int testandset (int *p)
846 + long int readval = 0;
848 + __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
849 + : "+m" (*p), "+a" (readval)
857 +static inline int testandset (int *p)
859 + long int readval = 0;
861 + __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
862 + : "+m" (*p), "+a" (readval)
870 +static inline int testandset (int *p)
874 + __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
877 + : "r" (1), "a" (p), "0" (*p)
878 + : "cc", "memory" );
884 +static inline int testandset (int *p)
889 + __asm__ __volatile__ ("0: mov 1,%2\n"
896 + : "=r" (ret), "=m" (*p), "=r" (one)
903 +static inline int testandset (int *p)
907 + __asm__ __volatile__("ldstub [%1], %0"
912 + return (ret ? 1 : 0);
917 +static inline int testandset (int *spinlock)
919 + register unsigned int ret;
920 + __asm__ __volatile__("swp %0, %1, [%2]"
922 + : "0"(1), "r"(spinlock));
929 +static inline int testandset (int *p)
932 + __asm__ __volatile__("tas %1; sne %0"
941 +/* Because malloc only guarantees 8-byte alignment for malloc'd data,
942 + and GCC only guarantees 8-byte alignment for stack locals, we can't
943 + be assured of 16-byte alignment for atomic lock data even if we
944 + specify "__attribute ((aligned(16)))" in the type declaration. So,
945 + we use a struct containing an array of four ints for the atomic lock
946 + type and dynamically select the 16-byte aligned int from the array
947 + for the semaphore. */
948 +#define __PA_LDCW_ALIGNMENT 16
949 +static inline void *ldcw_align (void *p) {
950 + unsigned long a = (unsigned long)p;
951 + a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
955 +static inline int testandset (spinlock_t *p)
959 + __asm__ __volatile__("ldcw 0(%1),%0"
968 +#include <ia64intrin.h>
970 +static inline int testandset (int *p)
972 + return __sync_lock_test_and_set (p, 1);
977 +static inline int testandset (int *p)
981 + __asm__ __volatile__ (
990 + : "=r" (ret), "+R" (*p)
998 +#if defined(__hppa__)
1000 +typedef int spinlock_t[4];
1002 +#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
1004 +static inline void resetlock (spinlock_t *p)
1006 + (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
1011 +typedef int spinlock_t;
1013 +#define SPIN_LOCK_UNLOCKED 0
1015 +static inline void resetlock (spinlock_t *p)
1017 + *p = SPIN_LOCK_UNLOCKED;
1022 +#if defined(CONFIG_USER_ONLY)
1023 +static inline void spin_lock(spinlock_t *lock)
1025 + while (testandset(lock));
1028 +static inline void spin_unlock(spinlock_t *lock)
1033 +static inline int spin_trylock(spinlock_t *lock)
1035 + return !testandset(lock);
1038 +static inline void spin_lock(spinlock_t *lock)
1042 +static inline void spin_unlock(spinlock_t *lock)
1046 +static inline int spin_trylock(spinlock_t *lock)
1053 Index: trunk/target-arm/cpu.h
1054 ===================================================================
1055 --- trunk.orig/target-arm/cpu.h 2008-04-24 20:16:41.000000000 +0100
1056 +++ trunk/target-arm/cpu.h 2008-04-24 20:16:53.000000000 +0100
1060 #define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
1061 +#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
1063 #define ARMV7M_EXCP_RESET 1
1064 #define ARMV7M_EXCP_NMI 2
1065 @@ -218,6 +219,15 @@
1066 void cpu_lock(void);
1067 void cpu_unlock(void);
1069 +void cpu_lock(void);
1070 +void cpu_unlock(void);
1071 +#if defined(USE_NPTL)
1072 +static inline void cpu_set_tls(CPUARMState *env, void *newtls)
1074 + env->cp15.c13_tls2 = (uint32_t)(long)newtls;
1078 #define CPSR_M (0x1f)
1079 #define CPSR_T (1 << 5)
1080 #define CPSR_F (1 << 6)
1081 Index: trunk/target-arm/translate.c
1082 ===================================================================
1083 --- trunk.orig/target-arm/translate.c 2008-04-24 20:16:41.000000000 +0100
1084 +++ trunk/target-arm/translate.c 2008-04-24 20:16:53.000000000 +0100
1085 @@ -8606,7 +8606,14 @@
1086 gen_exception(EXCP_EXCEPTION_EXIT);
1090 +#ifdef CONFIG_USER_ONLY
1091 + /* Intercept jump to the magic kernel page. */
1092 + if (dc->pc > 0xffff0000) {
1093 + gen_exception(EXCP_KERNEL_TRAP);
1094 + dc->is_jmp = DISAS_UPDATE;
1098 if (env->nb_breakpoints > 0) {
1099 for(j = 0; j < env->nb_breakpoints; j++) {
1100 if (env->breakpoints[j] == dc->pc) {