summaryrefslogtreecommitdiff
path: root/absl/debugging/internal/stacktrace_x86-inl.inc
blob: bc320ff75bc533774107e8ee1efc46296c92e29d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Produce stack trace

#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_

#if defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
#include <ucontext.h>  // for ucontext_t
#endif

#if !defined(_WIN32)
#include <unistd.h>
#endif

#include <cassert>
#include <cstdint>

#include "absl/base/macros.h"
#include "absl/base/port.h"
#include "absl/debugging/internal/address_is_readable.h"
#include "absl/debugging/internal/vdso_support.h"  // a no-op on non-elf or non-glibc systems
#include "absl/debugging/stacktrace.h"

#include "absl/base/internal/raw_logging.h"

using absl::debugging_internal::AddressIsReadable;

#if defined(__linux__) && defined(__i386__)
// Count "push %reg" instructions in VDSO __kernel_vsyscall(),
// preceeding "syscall" or "sysenter".
// If __kernel_vsyscall uses frame pointer, answer 0.
//
// kMaxBytes tells how many instruction bytes of __kernel_vsyscall
// to analyze before giving up. Up to kMaxBytes+1 bytes of
// instructions could be accessed.
//
// Here are known __kernel_vsyscall instruction sequences:
//
// SYSENTER (linux-2.6.26/arch/x86/vdso/vdso32/sysenter.S).
// Used on Intel.
//  0xffffe400 <__kernel_vsyscall+0>:       push   %ecx
//  0xffffe401 <__kernel_vsyscall+1>:       push   %edx
//  0xffffe402 <__kernel_vsyscall+2>:       push   %ebp
//  0xffffe403 <__kernel_vsyscall+3>:       mov    %esp,%ebp
//  0xffffe405 <__kernel_vsyscall+5>:       sysenter
//
// SYSCALL (see linux-2.6.26/arch/x86/vdso/vdso32/syscall.S).
// Used on AMD.
//  0xffffe400 <__kernel_vsyscall+0>:       push   %ebp
//  0xffffe401 <__kernel_vsyscall+1>:       mov    %ecx,%ebp
//  0xffffe403 <__kernel_vsyscall+3>:       syscall
//

// The sequence below isn't actually expected in Google fleet,
// here only for completeness. Remove this comment from OSS release.

// i386 (see linux-2.6.26/arch/x86/vdso/vdso32/int80.S)
//  0xffffe400 <__kernel_vsyscall+0>:       int $0x80
//  0xffffe401 <__kernel_vsyscall+1>:       ret
//
static const int kMaxBytes = 10;

// We use assert()s instead of DCHECK()s -- this is too low level
// for DCHECK().

static int CountPushInstructions(const unsigned char *const addr) {
  int result = 0;
  for (int i = 0; i < kMaxBytes; ++i) {
    if (addr[i] == 0x89) {
      // "mov reg,reg"
      if (addr[i + 1] == 0xE5) {
        // Found "mov %esp,%ebp".
        return 0;
      }
      ++i;  // Skip register encoding byte.
    } else if (addr[i] == 0x0F &&
               (addr[i + 1] == 0x34 || addr[i + 1] == 0x05)) {
      // Found "sysenter" or "syscall".
      return result;
    } else if ((addr[i] & 0xF0) == 0x50) {
      // Found "push %reg".
      ++result;
    } else if (addr[i] == 0xCD && addr[i + 1] == 0x80) {
      // Found "int $0x80"
      assert(result == 0);
      return 0;
    } else {
      // Unexpected instruction.
      assert(false && "unexpected instruction in __kernel_vsyscall");
      return 0;
    }
  }
  // Unexpected: didn't find SYSENTER or SYSCALL in
  // [__kernel_vsyscall, __kernel_vsyscall + kMaxBytes) interval.
  assert(false && "did not find SYSENTER or SYSCALL in __kernel_vsyscall");
  return 0;
}
#endif

// Assume stack frames larger than 100,000 bytes are bogus.
static const int kMaxFrameBytes = 100000;

// Returns the stack frame pointer from signal context, 0 if unknown.
// vuc is a ucontext_t *.  We use void* to avoid the use
// of ucontext_t on non-POSIX systems.
static uintptr_t GetFP(const void *vuc) {
#if !defined(__linux__)
  static_cast<void>(vuc);  // Avoid an unused argument compiler warning.
#else
  if (vuc != nullptr) {
    auto *uc = reinterpret_cast<const ucontext_t *>(vuc);
#if defined(__i386__)
    const auto bp = uc->uc_mcontext.gregs[REG_EBP];
    const auto sp = uc->uc_mcontext.gregs[REG_ESP];
#elif defined(__x86_64__)
    const auto bp = uc->uc_mcontext.gregs[REG_RBP];
    const auto sp = uc->uc_mcontext.gregs[REG_RSP];
#else
    const uintptr_t bp = 0;
    const uintptr_t sp = 0;
#endif
    // Sanity-check that the base pointer is valid.  It should be as long as
    // SHRINK_WRAP_FRAME_POINTER is not set, but it's possible that some code in
    // the process is compiled with --copt=-fomit-frame-pointer or
    // --copt=-momit-leaf-frame-pointer.
    //
    // TODO(bcmills): -momit-leaf-frame-pointer is currently the default
    // behavior when building with clang.  Talk to the C++ toolchain team about
    // fixing that.
    if (bp >= sp && bp - sp <= kMaxFrameBytes) return bp;

    // If bp isn't a plausible frame pointer, return the stack pointer instead.
    // If we're lucky, it points to the start of a stack frame; otherwise, we'll
    // get one frame of garbage in the stack trace and fail the sanity check on
    // the next iteration.
    return sp;
  }
#endif
  return 0;
}

// Given a pointer to a stack frame, locate and return the calling
// stackframe, or return null if no stackframe can be found. Perform sanity
// checks (the strictness of which is controlled by the boolean parameter
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
static void **NextStackFrame(void **old_fp, const void *uc) {
  void **new_fp = (void **)*old_fp;

#if defined(__linux__) && defined(__i386__)
  if (WITH_CONTEXT && uc != nullptr) {
    // How many "push %reg" instructions are there at __kernel_vsyscall?
    // This is constant for a given kernel and processor, so compute
    // it only once.
    static int num_push_instructions = -1;  // Sentinel: not computed yet.
    // Initialize with sentinel value: __kernel_rt_sigreturn can not possibly
    // be there.
    static const unsigned char *kernel_rt_sigreturn_address = nullptr;
    static const unsigned char *kernel_vsyscall_address = nullptr;
    if (num_push_instructions == -1) {
#ifdef ABSL_HAVE_VDSO_SUPPORT
      absl::debugging_internal::VDSOSupport vdso;
      if (vdso.IsPresent()) {
        absl::debugging_internal::VDSOSupport::SymbolInfo
            rt_sigreturn_symbol_info;
        absl::debugging_internal::VDSOSupport::SymbolInfo vsyscall_symbol_info;
        if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.5", STT_FUNC,
                               &rt_sigreturn_symbol_info) ||
            !vdso.LookupSymbol("__kernel_vsyscall", "LINUX_2.5", STT_FUNC,
                               &vsyscall_symbol_info) ||
            rt_sigreturn_symbol_info.address == nullptr ||
            vsyscall_symbol_info.address == nullptr) {
          // Unexpected: 32-bit VDSO is present, yet one of the expected
          // symbols is missing or null.
          assert(false && "VDSO is present, but doesn't have expected symbols");
          num_push_instructions = 0;
        } else {
          kernel_rt_sigreturn_address =
              reinterpret_cast<const unsigned char *>(
                  rt_sigreturn_symbol_info.address);
          kernel_vsyscall_address =
              reinterpret_cast<const unsigned char *>(
                  vsyscall_symbol_info.address);
          num_push_instructions =
              CountPushInstructions(kernel_vsyscall_address);
        }
      } else {
        num_push_instructions = 0;
      }
#else  // ABSL_HAVE_VDSO_SUPPORT
      num_push_instructions = 0;
#endif  // ABSL_HAVE_VDSO_SUPPORT
    }
    if (num_push_instructions != 0 && kernel_rt_sigreturn_address != nullptr &&
        old_fp[1] == kernel_rt_sigreturn_address) {
      const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
      // This kernel does not use frame pointer in its VDSO code,
      // and so %ebp is not suitable for unwinding.
      void **const reg_ebp =
          reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_EBP]);
      const unsigned char *const reg_eip =
          reinterpret_cast<unsigned char *>(ucv->uc_mcontext.gregs[REG_EIP]);
      if (new_fp == reg_ebp && kernel_vsyscall_address <= reg_eip &&
          reg_eip - kernel_vsyscall_address < kMaxBytes) {
        // We "stepped up" to __kernel_vsyscall, but %ebp is not usable.
        // Restore from 'ucv' instead.
        void **const reg_esp =
            reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_ESP]);
        // Check that alleged %esp is not null and is reasonably aligned.
        if (reg_esp &&
            ((uintptr_t)reg_esp & (sizeof(reg_esp) - 1)) == 0) {
          // Check that alleged %esp is actually readable. This is to prevent
          // "double fault" in case we hit the first fault due to e.g. stack
          // corruption.
          void *const reg_esp2 = reg_esp[num_push_instructions - 1];
          if (AddressIsReadable(reg_esp2)) {
            // Alleged %esp is readable, use it for further unwinding.
            new_fp = reinterpret_cast<void **>(reg_esp2);
          }
        }
      }
    }
  }
#endif

  const uintptr_t old_fp_u = reinterpret_cast<uintptr_t>(old_fp);
  const uintptr_t new_fp_u = reinterpret_cast<uintptr_t>(new_fp);

  // Check that the transition from frame pointer old_fp to frame
  // pointer new_fp isn't clearly bogus.  Skip the checks if new_fp
  // matches the signal context, so that we don't skip out early when
  // using an alternate signal stack.
  //
  // TODO(bcmills): The GetFP call should be completely unnecessary when
  // SHRINK_WRAP_FRAME_POINTER is set (because we should be back in the thread's
  // stack by this point), but it is empirically still needed (e.g. when the
  // stack includes a call to abort).  unw_get_reg returns UNW_EBADREG for some
  // frames.  Figure out why GetValidFrameAddr and/or libunwind isn't doing what
  // it's supposed to.
  if (STRICT_UNWINDING &&
      (!WITH_CONTEXT || uc == nullptr || new_fp_u != GetFP(uc))) {
    // With the stack growing downwards, older stack frame must be
    // at a greater address that the current one.
    if (new_fp_u <= old_fp_u) return nullptr;
    if (new_fp_u - old_fp_u > kMaxFrameBytes) return nullptr;
  } else {
    if (new_fp == nullptr) return nullptr;  // skip AddressIsReadable() below
    // In the non-strict mode, allow discontiguous stack frames.
    // (alternate-signal-stacks for example).
    if (new_fp == old_fp) return nullptr;
  }

  if (new_fp_u & (sizeof(void *) - 1)) return nullptr;
#ifdef __i386__
  // On 32-bit machines, the stack pointer can be very close to
  // 0xffffffff, so we explicitly check for a pointer into the
  // last two pages in the address space
  if (new_fp_u >= 0xffffe000) return nullptr;
#endif
#if !defined(_WIN32)
  if (!STRICT_UNWINDING) {
    // Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test
    // on AMD-based machines with VDSO-enabled kernels.
    // Make an extra sanity check to insure new_fp is readable.
    // Note: NextStackFrame<false>() is only called while the program
    //       is already on its last leg, so it's ok to be slow here.

    if (!AddressIsReadable(new_fp)) {
      return nullptr;
    }
  }
#endif
  return new_fp;
}

template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS  // May read random elements from stack.
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY   // May read random elements from stack.
ABSL_ATTRIBUTE_NOINLINE
static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
                      const void *ucp, int *min_dropped_frames) {
  int n = 0;
  void **fp = reinterpret_cast<void **>(__builtin_frame_address(0));

  while (fp && n < max_depth) {
    if (*(fp + 1) == reinterpret_cast<void *>(0)) {
      // In 64-bit code, we often see a frame that
      // points to itself and has a return address of 0.
      break;
    }
    void **next_fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
    if (skip_count > 0) {
      skip_count--;
    } else {
      result[n] = *(fp + 1);
      if (IS_STACK_FRAMES) {
        if (next_fp > fp) {
          sizes[n] = (uintptr_t)next_fp - (uintptr_t)fp;
        } else {
          // A frame-size of 0 is used to indicate unknown frame size.
          sizes[n] = 0;
        }
      }
      n++;
    }
    fp = next_fp;
  }
  if (min_dropped_frames != nullptr) {
    // Implementation detail: we clamp the max of frames we are willing to
    // count, so as not to spend too much time in the loop below.
    const int kMaxUnwind = 1000;
    int j = 0;
    for (; fp != nullptr && j < kMaxUnwind; j++) {
      fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
    }
    *min_dropped_frames = j;
  }
  return n;
}

namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
bool StackTraceWorksForTest() {
  return true;
}
}  // namespace debugging_internal
ABSL_NAMESPACE_END
}  // namespace absl

#endif  // ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_