diff options
author | Petr Machata <pmachata@redhat.com> | 2012-04-12 23:50:23 +0200 |
---|---|---|
committer | Petr Machata <pmachata@redhat.com> | 2012-04-12 23:50:23 +0200 |
commit | a266acb9c3bbde884a32268f164de62d03aa04d0 (patch) | |
tree | 5799ecec3005cf142df0d69e380569af75ee82d0 | |
parent | 1429874dee5758cc99c0dae73bd3f928109e53d7 (diff) | |
download | ltrace-a266acb9c3bbde884a32268f164de62d03aa04d0.tar.gz |
Add code for stepping over atomic instruction sequence on PPC
-rw-r--r-- | sysdeps/linux-gnu/ppc/arch.h | 1 | ||||
-rw-r--r-- | sysdeps/linux-gnu/ppc/trace.c | 82 | ||||
-rw-r--r-- | sysdeps/linux-gnu/trace.c | 109 | ||||
-rw-r--r-- | testsuite/ltrace.torture/ppc-lwarx.c | 44 | ||||
-rw-r--r-- | testsuite/ltrace.torture/ppc-lwarx.exp | 55 |
5 files changed, 275 insertions, 16 deletions
diff --git a/sysdeps/linux-gnu/ppc/arch.h b/sysdeps/linux-gnu/ppc/arch.h index 711b4a3..64c1821 100644 --- a/sysdeps/linux-gnu/ppc/arch.h +++ b/sysdeps/linux-gnu/ppc/arch.h @@ -15,6 +15,7 @@ /* Start of arch-specific functions. */ #define ARCH_HAVE_UMOVELONG +#define ARCH_HAVE_ATOMIC_SINGLESTEP #define PPC_NOP { 0x60, 0x00, 0x00, 0x00 } #define PPC_NOP_LENGTH 4 diff --git a/sysdeps/linux-gnu/ppc/trace.c b/sysdeps/linux-gnu/ppc/trace.c index 8642157..05993de 100644 --- a/sysdeps/linux-gnu/ppc/trace.c +++ b/sysdeps/linux-gnu/ppc/trace.c @@ -197,3 +197,85 @@ arch_umovelong (Process *proc, void *addr, long *result, arg_type_info *info) { *result = pointed_to; return 0; } + +/* The atomic skip code is mostly taken from GDB. */ + +/* Instruction masks used during single-stepping of atomic + * sequences. This was lifted from GDB. */ +#define LWARX_MASK 0xfc0007fe +#define LWARX_INSTRUCTION 0x7c000028 +#define LDARX_INSTRUCTION 0x7c0000A8 +#define STWCX_MASK 0xfc0007ff +#define STWCX_INSTRUCTION 0x7c00012d +#define STDCX_INSTRUCTION 0x7c0001ad +#define BC_MASK 0xfc000000 +#define BC_INSTRUCTION 0x40000000 + +int +arch_atomic_singlestep(struct Process *proc, Breakpoint *sbp, + int (*add_cb)(void *addr, void *data), + void *add_cb_data) +{ + void *addr = sbp->addr; + debug(1, "pid=%d addr=%p", proc->pid, addr); + + /* If the original instruction was lwarx/ldarx, we can't + * single-step over it, instead we have to execute the whole + * atomic block at once. */ + union { + uint32_t insn; + char buf[4]; + } u; + memcpy(u.buf, sbp->orig_value, BREAKPOINT_LENGTH); + + if ((u.insn & LWARX_MASK) != LWARX_INSTRUCTION + && (u.insn & LWARX_MASK) != LDARX_INSTRUCTION) + return 1; + + int insn_count; + for (insn_count = 0; ; ++insn_count) { + addr += 4; + unsigned long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0); + if (l == (unsigned long)-1 && errno) + return -1; + uint32_t insn; +#ifdef __powerpc64__ + insn = l >> 32; +#else + insn = l; +#endif + + /* If we hit a branch instruction, give up. The + * computation could escape that way and we'd have to + * treat that case specially. */ + if ((insn & BC_MASK) == BC_INSTRUCTION) { + debug(1, "pid=%d, found branch at %p, giving up", + proc->pid, addr); + return -1; + } + + if ((insn & STWCX_MASK) == STWCX_INSTRUCTION + || (insn & STWCX_MASK) == STDCX_INSTRUCTION) { + debug(1, "pid=%d, found end of atomic block at %p", + proc->pid, addr); + break; + } + + /* Arbitrary cut-off. If we didn't find the + * terminating instruction by now, just give up. */ + if (insn_count > 16) { + debug(1, "pid=%d, couldn't find end of atomic block", + proc->pid); + return -1; + } + } + + /* Put the breakpoint to the next instruction. */ + addr += 4; + if (add_cb(addr, add_cb_data) < 0) + return -1; + + debug(1, "PTRACE_CONT"); + ptrace(PTRACE_CONT, proc->pid, 0, 0); + return 0; +} diff --git a/sysdeps/linux-gnu/trace.c b/sysdeps/linux-gnu/trace.c index 9ecea1e..d962048 100644 --- a/sysdeps/linux-gnu/trace.c +++ b/sysdeps/linux-gnu/trace.c @@ -249,6 +249,9 @@ struct process_stopping_handler /* The pointer being re-enabled. */ Breakpoint * breakpoint_being_enabled; + /* Artificial atomic skip breakpoint, if any needed. */ + void *atomic_skip_bp_addr; + enum { /* We are waiting for everyone to land in t/T. */ psh_stopping = 0, @@ -612,12 +615,84 @@ all_stops_accountable(struct pid_set * pids) return 1; } -static void -singlestep(Process * proc) +/* The protocol is: 0 for success, negative for failure, positive if + * default singlestep is to be used. */ +int arch_atomic_singlestep(struct Process *proc, Breakpoint *sbp, + int (*add_cb)(void *addr, void *data), + void *add_cb_data); + +#ifndef ARCH_HAVE_ATOMIC_SINGLESTEP +int +arch_atomic_singlestep(struct Process *proc, Breakpoint *sbp, + int (*add_cb)(void *addr, void *data), + void *add_cb_data) +{ + return 1; +} +#endif + +static int +atomic_singlestep_add_bp(void *addr, void *data) +{ + struct process_stopping_handler *self = data; + struct Process *proc = self->task_enabling_breakpoint; + + /* Only support single address as of now. */ + assert(self->atomic_skip_bp_addr == NULL); + + self->atomic_skip_bp_addr = addr + 4; + insert_breakpoint(proc->leader, self->atomic_skip_bp_addr, NULL, 1); + + return 0; +} + +static int +singlestep(struct process_stopping_handler *self) { + struct Process *proc = self->task_enabling_breakpoint; + + int status = arch_atomic_singlestep(self->task_enabling_breakpoint, + self->breakpoint_being_enabled, + &atomic_singlestep_add_bp, self); + + /* Propagate failure and success. */ + if (status <= 0) + return status; + + /* Otherwise do the default action: singlestep. */ debug(1, "PTRACE_SINGLESTEP"); - if (ptrace(PTRACE_SINGLESTEP, proc->pid, 0, 0)) + if (ptrace(PTRACE_SINGLESTEP, proc->pid, 0, 0)) { perror("PTRACE_SINGLESTEP"); + return -1; + } + return 0; +} + +static void +post_singlestep(struct process_stopping_handler *self, Event **eventp) +{ + continue_for_sigstop_delivery(&self->pids); + + if ((*eventp)->type == EVENT_BREAKPOINT) + *eventp = NULL; // handled + + if (self->atomic_skip_bp_addr != 0) + delete_breakpoint(self->task_enabling_breakpoint->leader, + self->atomic_skip_bp_addr); + + self->breakpoint_being_enabled = NULL; +} + +static void +singlestep_error(struct process_stopping_handler *self, Event **eventp) +{ + struct Process *teb = self->task_enabling_breakpoint; + Breakpoint *sbp = self->breakpoint_being_enabled; + fprintf(stderr, "%d couldn't singlestep over %s (%p)\n", + teb->pid, sbp->libsym != NULL ? sbp->libsym->name : NULL, + sbp->addr); + delete_breakpoint(teb->leader, sbp->addr); + post_singlestep(self, eventp); } /* This event handler is installed when we are in the process of @@ -670,7 +745,11 @@ process_stopping_on_event(Event_Handler * super, Event * event) teb->pid); if (sbp->enabled) disable_breakpoint(teb, sbp); - singlestep(teb); + if (singlestep(self) < 0) { + singlestep_error(self, &event); + goto psh_sinking; + } + self->state = state = psh_singlestep; } break; @@ -682,7 +761,10 @@ process_stopping_on_event(Event_Handler * super, Event * event) /* This is not the singlestep that we are waiting for. */ if (event->type == EVENT_SIGNAL) { - singlestep(task); + if (singlestep(self) < 0) { + singlestep_error(self, &event); + goto psh_sinking; + } break; } @@ -692,18 +774,13 @@ process_stopping_on_event(Event_Handler * super, Event * event) if (sbp->enabled) enable_breakpoint(teb, sbp); - continue_for_sigstop_delivery(&self->pids); - - self->breakpoint_being_enabled = NULL; - self->state = state = psh_sinking; - - if (event->type == EVENT_BREAKPOINT) - event = NULL; // handled - } else - break; - - /* fall-through */ + post_singlestep(self, &event); + goto psh_sinking; + } + break; + psh_sinking: + state = self->state = psh_sinking; case psh_sinking: if (await_sigstop_delivery(&self->pids, task_info, event)) process_stopping_done(self, leader); diff --git a/testsuite/ltrace.torture/ppc-lwarx.c b/testsuite/ltrace.torture/ppc-lwarx.c new file mode 100644 index 0000000..0716407 --- /dev/null +++ b/testsuite/ltrace.torture/ppc-lwarx.c @@ -0,0 +1,44 @@ +/* + * This file is part of ltrace. + * Copyright (C) 2012 Petr Machata, Red Hat Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <stdint.h> + +__attribute__((noinline, optimize(3))) void +atomic_add(uint32_t *a, uint32_t b) +{ + __asm__ volatile("lwarx 9,0,%0\n" + "add 9,9,%2\n" + "stwcx. 9,0,%0\n" + "bne- atomic_add\n" + : "=r"(a) + : "0"(a), "r"(b) + : "%r9"); +} + +uint32_t a = 0; + +__attribute__((optimize(0))) int +main(int argc, char **argv) +{ + atomic_add(&a, 5); + atomic_add(&a, 10); + atomic_add(&a, 15); + return a; +} diff --git a/testsuite/ltrace.torture/ppc-lwarx.exp b/testsuite/ltrace.torture/ppc-lwarx.exp new file mode 100644 index 0000000..bc2eba4 --- /dev/null +++ b/testsuite/ltrace.torture/ppc-lwarx.exp @@ -0,0 +1,55 @@ +# This file is part of ltrace. +# Copyright (C) 2012 Petr Machata, Red Hat Inc. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA + +set testfile "ppc-lwarx" +set srcfile ${testfile}.c +set binfile ${testfile} + +if [get_compiler_info $binfile] { + return -1 +} + +if { [istarget powerpc*-*] } then { + verbose "compiling source file now....." + if { [ltrace_compile $srcdir/$subdir/$srcfile $objdir/$subdir/$binfile executable {debug} ] != "" } { + send_user "Testcase compile failed, so all tests in this file will automatically fail\n." + } + + # set options for ltrace. + ltrace_options "-x" "atomic_add" "-e" "!atoi" + + # Run PUT for ltarce. + set exec_output [ltrace_runtest $objdir/$subdir $objdir/$subdir/$binfile] + + # Check the output of this program. + verbose "ltrace runtest output: $exec_output\n" + if [regexp {ELF from incompatible architecture} $exec_output] { + fail "32-bit ltrace can not perform on 64-bit PUTs and rebuild ltrace in 64 bit mode!" + return + } elseif [ regexp {Couldn't get .hash data} $exec_output ] { + fail "Couldn't get .hash data!" + return + } + + set pattern "atomic_add(.*, 5,.*)" + ltrace_verify_output ${objdir}/${subdir}/${testfile}.ltrace $pattern 1 + set pattern "atomic_add(.*, 10,.*)" + ltrace_verify_output ${objdir}/${subdir}/${testfile}.ltrace $pattern 1 + set pattern "atomic_add(.*, 15,.*)" + ltrace_verify_output ${objdir}/${subdir}/${testfile}.ltrace $pattern 1 +} |