This instance will be upgraded to Heptapod 0.26.0rc1 on 2021-10-25 at 14:00 UTC+2 (a few minutes of down time)

Commit 414b21be authored by Maciej Fijalkowski's avatar Maciej Fijalkowski
Browse files

completely backout vmprof merge

parent b8b236fde733
......@@ -420,10 +420,3 @@ The gdbm module includes code from gdbm.h, which is distributed under
the terms of the GPL license version 2 or any later version. Thus the
gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed
under the terms of the GPL license as well.
License for 'pypy/module/_vmprof/src'
--------------------------------------
The code is based on gperftools. You may see a copy of the License for it at
https://code.google.com/p/gperftools/source/browse/COPYING
......@@ -38,9 +38,6 @@ working_modules.update([
"_csv", "cppyy", "_pypyjson"
])
if sys.platform.startswith('linux') and sys.maxint > 2147483647:
working_modules.add('_vmprof')
translation_modules = default_modules.copy()
translation_modules.update([
"fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5",
......@@ -102,7 +99,6 @@ module_import_dependencies = {
"_hashlib" : ["pypy.module._ssl.interp_ssl"],
"_minimal_curses": ["pypy.module._minimal_curses.fficurses"],
"_continuation": ["rpython.rlib.rstacklet"],
"_vmprof" : ["pypy.module._vmprof.interp_vmprof"],
}
def get_module_validator(modname):
......
......@@ -9,7 +9,6 @@ from rpython.rlib.objectmodel import (we_are_translated, newlist_hint,
from rpython.rlib.signature import signature
from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \
INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX
from rpython.rlib.rweaklist import RWeakListMixin
from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag,
UserDelAction)
......@@ -367,10 +366,6 @@ class CannotHaveLock(Exception):
# ____________________________________________________________
class CodeObjWeakList(RWeakListMixin):
def __init__(self):
self.initialize()
class ObjSpace(object):
"""Base class for the interpreter-level implementations of object spaces.
http://pypy.readthedocs.org/en/latest/objspace.html"""
......@@ -394,7 +389,6 @@ class ObjSpace(object):
self.check_signal_action = None # changed by the signal module
self.user_del_action = UserDelAction(self)
self._code_of_sys_exc_info = None
self.all_code_objs = CodeObjWeakList()
# can be overridden to a subclass
self.initialize()
......@@ -672,16 +666,6 @@ class ObjSpace(object):
assert ec is not None
return ec
def register_code_object(self, pycode):
callback = self.getexecutioncontext().register_code_callback
if callback is not None:
callback(self, pycode)
self.all_code_objs.add_handle(pycode)
def set_code_callback(self, callback):
ec = self.getexecutioncontext()
ec.register_code_callback = callback
def _freeze_(self):
return True
......
......@@ -33,11 +33,6 @@ class ExecutionContext(object):
self.profilefunc = None
self.w_profilefuncarg = None
self.thread_disappeared = False # might be set to True after os.fork()
self.register_code_callback = None
if sys.maxint == 2147483647:
self._code_unique_id = 0 # XXX this is wrong, it won't work on 32bit
else:
self._code_unique_id = 0x7000000000000000
@staticmethod
def _mark_thread_disappeared(space):
......
......@@ -14,10 +14,9 @@ from pypy.interpreter.astcompiler.consts import (
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED,
CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY)
from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT
from rpython.rlib.rarithmetic import intmask, r_longlong
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.objectmodel import compute_hash
from rpython.rlib import jit
from rpython.rlib.debug import debug_start, debug_stop, debug_print
class BytecodeCorruption(Exception):
......@@ -55,9 +54,8 @@ class PyCode(eval.Code):
"CPython-style code objects."
_immutable_ = True
_immutable_fields_ = ["co_consts_w[*]", "co_names_w[*]", "co_varnames[*]",
"co_freevars[*]", "co_cellvars[*]",
"_args_as_cellvars[*]"]
"co_freevars[*]", "co_cellvars[*]", "_args_as_cellvars[*]"]
def __init__(self, space, argcount, nlocals, stacksize, flags,
code, consts, names, varnames, filename,
name, firstlineno, lnotab, freevars, cellvars,
......@@ -85,7 +83,6 @@ class PyCode(eval.Code):
self.magic = magic
self._signature = cpython_code_signature(self)
self._initialize()
space.register_code_object(self)
def _initialize(self):
if self.co_cellvars:
......@@ -127,15 +124,6 @@ class PyCode(eval.Code):
from pypy.objspace.std.mapdict import init_mapdict_cache
init_mapdict_cache(self)
ec = self.space.getexecutioncontext()
self._unique_id = ec._code_unique_id
ec._code_unique_id += 2 # so we have one bit that we can mark stuff
# with
def _get_full_name(self):
return "py:%s:%d:%s" % (self.co_name, self.co_firstlineno,
self.co_filename)
def _cleanup_(self):
if (self.magic == cpython_magic and
'__pypy__' not in sys.builtin_module_names):
......
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
"""
Write me :)
"""
appleveldefs = {
}
interpleveldefs = {
'enable': 'interp_vmprof.enable',
'disable': 'interp_vmprof.disable',
}
def setup_after_space_initialization(self):
# force the __extend__ hacks to occur early
from pypy.module._vmprof.interp_vmprof import VMProf
self.vmprof = VMProf()
import py, os, sys
from rpython.rtyper.lltypesystem import lltype, rffi, llmemory
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.annlowlevel import cast_instance_to_gcref, cast_base_ptr_to_instance
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib import jit, rposix, entrypoint
from rpython.rtyper.tool import rffi_platform as platform
from rpython.rlib.rstring import StringBuilder
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import oefmt, wrap_oserror, OperationError
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.pyframe import PyFrame
ROOT = py.path.local(__file__).join('..')
SRC = ROOT.join('src')
# by default, we statically link vmprof.c into pypy; however, if you set
# DYNAMIC_VMPROF to True, it will be dynamically linked to the libvmprof.so
# which is expected to be inside pypy/module/_vmprof/src: this is very useful
# during development. Note that you have to manually build libvmprof by
# running make inside the src dir
DYNAMIC_VMPROF = False
eci_kwds = dict(
include_dirs = [SRC],
includes = ['vmprof.h', 'trampoline.h'],
separate_module_files = [SRC.join('trampoline.asmgcc.s')],
libraries = ['unwind'],
post_include_bits=["""
void* pypy_vmprof_get_virtual_ip(void*);
void pypy_vmprof_init(void);
"""],
separate_module_sources=["""
void pypy_vmprof_init(void) {
vmprof_set_mainloop(pypy_execute_frame_trampoline, 0,
pypy_vmprof_get_virtual_ip);
}
"""],
)
if DYNAMIC_VMPROF:
eci_kwds['libraries'] += ['vmprof']
eci_kwds['link_extra'] = ['-Wl,-rpath,%s' % SRC, '-L%s' % SRC]
else:
eci_kwds['separate_module_files'] += [SRC.join('vmprof.c')]
eci = ExternalCompilationInfo(**eci_kwds)
check_eci = eci.merge(ExternalCompilationInfo(separate_module_files=[
SRC.join('fake_pypy_api.c')]))
platform.verify_eci(check_eci)
pypy_execute_frame_trampoline = rffi.llexternal(
"pypy_execute_frame_trampoline",
[llmemory.GCREF, llmemory.GCREF, llmemory.GCREF],
llmemory.GCREF,
compilation_info=eci,
_nowrapper=True, sandboxsafe=True,
random_effects_on_gcobjs=True)
pypy_vmprof_init = rffi.llexternal("pypy_vmprof_init", [], lltype.Void,
compilation_info=eci)
vmprof_enable = rffi.llexternal("vmprof_enable",
[rffi.INT, rffi.LONG, rffi.INT,
rffi.CCHARP, rffi.INT],
rffi.INT, compilation_info=eci,
save_err=rffi.RFFI_SAVE_ERRNO)
vmprof_disable = rffi.llexternal("vmprof_disable", [], rffi.INT,
compilation_info=eci,
save_err=rffi.RFFI_SAVE_ERRNO)
vmprof_register_virtual_function = rffi.llexternal(
"vmprof_register_virtual_function",
[rffi.CCHARP, rffi.VOIDP, rffi.VOIDP], lltype.Void,
compilation_info=eci, _nowrapper=True)
original_execute_frame = PyFrame.execute_frame.im_func
original_execute_frame.c_name = 'pypy_pyframe_execute_frame'
original_execute_frame._dont_inline_ = True
class __extend__(PyFrame):
def execute_frame(frame, w_inputvalue=None, operr=None):
# go through the asm trampoline ONLY if we are translated but not being JITted.
#
# If we are not translated, we obviously don't want to go through the
# trampoline because there is no C function it can call.
#
# If we are being JITted, we want to skip the trampoline, else the JIT
# cannot see throug it
if we_are_translated() and not jit.we_are_jitted():
# if we are translated, call the trampoline
gc_frame = cast_instance_to_gcref(frame)
gc_inputvalue = cast_instance_to_gcref(w_inputvalue)
gc_operr = cast_instance_to_gcref(operr)
gc_result = pypy_execute_frame_trampoline(gc_frame, gc_inputvalue, gc_operr)
return cast_base_ptr_to_instance(W_Root, gc_result)
else:
return original_execute_frame(frame, w_inputvalue, operr)
@entrypoint.entrypoint_lowlevel('main', [llmemory.GCREF],
'pypy_vmprof_get_virtual_ip', True)
def get_virtual_ip(gc_frame):
frame = cast_base_ptr_to_instance(PyFrame, gc_frame)
if jit._get_virtualizable_token(frame):
return rffi.cast(rffi.VOIDP, 0)
virtual_ip = do_get_virtual_ip(frame)
return rffi.cast(rffi.VOIDP, virtual_ip)
def do_get_virtual_ip(frame):
return frame.pycode._unique_id
def write_long_to_string_builder(l, b):
if sys.maxint == 2147483647:
b.append(chr(l & 0xff))
b.append(chr((l >> 8) & 0xff))
b.append(chr((l >> 16) & 0xff))
b.append(chr((l >> 24) & 0xff))
else:
b.append(chr(l & 0xff))
b.append(chr((l >> 8) & 0xff))
b.append(chr((l >> 16) & 0xff))
b.append(chr((l >> 24) & 0xff))
b.append(chr((l >> 32) & 0xff))
b.append(chr((l >> 40) & 0xff))
b.append(chr((l >> 48) & 0xff))
b.append(chr((l >> 56) & 0xff))
class VMProf(object):
def __init__(self):
self.is_enabled = False
self.ever_enabled = False
self.mapping_so_far = [] # stored mapping in between runs
self.fileno = -1
def enable(self, space, fileno, period):
if self.is_enabled:
raise oefmt(space.w_ValueError, "_vmprof already enabled")
self.fileno = fileno
self.is_enabled = True
self.write_header(fileno, period)
if not self.ever_enabled:
if we_are_translated():
pypy_vmprof_init()
self.ever_enabled = True
for weakcode in space.all_code_objs.get_all_handles():
code = weakcode()
if code:
self.register_code(space, code)
space.set_code_callback(vmprof_register_code)
if we_are_translated():
# does not work untranslated
res = vmprof_enable(fileno, period, 0,
lltype.nullptr(rffi.CCHARP.TO), 0)
else:
res = 0
if res == -1:
raise wrap_oserror(space, OSError(rposix.get_saved_errno(),
"_vmprof.enable"))
def write_header(self, fileno, period):
if period == -1:
period_usec = 1000000 / 100 # 100hz
else:
period_usec = period
b = StringBuilder()
write_long_to_string_builder(0, b)
write_long_to_string_builder(3, b)
write_long_to_string_builder(0, b)
write_long_to_string_builder(period_usec, b)
write_long_to_string_builder(0, b)
os.write(fileno, b.build())
def register_code(self, space, code):
if self.fileno == -1:
raise OperationError(space.w_RuntimeError,
space.wrap("vmprof not running"))
name = code._get_full_name()
b = StringBuilder()
b.append('\x02')
write_long_to_string_builder(code._unique_id, b)
write_long_to_string_builder(len(name), b)
b.append(name)
os.write(self.fileno, b.build())
def disable(self, space):
if not self.is_enabled:
raise oefmt(space.w_ValueError, "_vmprof not enabled")
self.is_enabled = False
self.fileno = -1
if we_are_translated():
# does not work untranslated
res = vmprof_disable()
else:
res = 0
space.set_code_callback(None)
if res == -1:
raise wrap_oserror(space, OSError(rposix.get_saved_errno(),
"_vmprof.disable"))
def vmprof_register_code(space, code):
from pypy.module._vmprof import Module
mod_vmprof = space.getbuiltinmodule('_vmprof')
assert isinstance(mod_vmprof, Module)
mod_vmprof.vmprof.register_code(space, code)
@unwrap_spec(fileno=int, period=int)
def enable(space, fileno, period=-1):
from pypy.module._vmprof import Module
mod_vmprof = space.getbuiltinmodule('_vmprof')
assert isinstance(mod_vmprof, Module)
mod_vmprof.vmprof.enable(space, fileno, period)
def disable(space):
from pypy.module._vmprof import Module
mod_vmprof = space.getbuiltinmodule('_vmprof')
assert isinstance(mod_vmprof, Module)
mod_vmprof.vmprof.disable(space)
#define HAVE_SYS_UCONTEXT_H
#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP]
long pypy_jit_start_addr(void)
{
return 3;
}
long pypy_jit_end_addr(void)
{
return 3;
}
long pypy_jit_stack_depth_at_loc(long x)
{
return 0;
}
long pypy_find_codemap_at_addr(long x)
{
return 0;
}
long pypy_yield_codemap_at_addr(long x, long y, long *a)
{
return 0;
}
void pypy_pyframe_execute_frame(void)
{
}
long pypy_jit_start_addr();
long pypy_jit_end_addr();
long pypy_jit_stack_depth_at_loc(long);
long pypy_find_codemap_at_addr(long);
long pypy_yield_codemap_at_addr(long, long, long*);
void vmprof_set_tramp_range(void* start, void* end)
{
}
static ptrdiff_t vmprof_unw_get_custom_offset(void* ip, unw_cursor_t *cp) {
intptr_t ip_l = (intptr_t)ip;
if (ip_l < pypy_jit_start_addr() || ip_l > pypy_jit_end_addr()) {
return -1;
}
return (void*)pypy_jit_stack_depth_at_loc(ip_l);
}
static long vmprof_write_header_for_jit_addr(void **result, long n,
void *ip, int max_depth)
{
long codemap_pos;
long current_pos = 0;
intptr_t id;
intptr_t addr = (intptr_t)ip;
if (addr < pypy_jit_start_addr() || addr > pypy_jit_end_addr()) {
return n;
}
codemap_pos = pypy_find_codemap_at_addr(addr);
if (codemap_pos == -1) {
return n;
}
while (1) {
id = pypy_yield_codemap_at_addr(codemap_pos, addr, &current_pos);
if (id == 0) {
return n;
}
result[n++] = id;
if (n >= max_depth) {
return n;
}
}
}
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Craig Silverstein
//
// This is an internal header file used by profiler.cc. It defines
// the single (inline) function GetPC. GetPC is used in a signal
// handler to figure out the instruction that was being executed when
// the signal-handler was triggered.
//
// To get this, we use the ucontext_t argument to the signal-handler
// callback, which holds the full context of what was going on when
// the signal triggered. How to get from a ucontext_t to a Program
// Counter is OS-dependent.
#ifndef BASE_GETPC_H_
#define BASE_GETPC_H_
#include "config.h"
// On many linux systems, we may need _GNU_SOURCE to get access to
// the defined constants that define the register we want to see (eg
// REG_EIP). Note this #define must come first!
#define _GNU_SOURCE 1
// If #define _GNU_SOURCE causes problems, this might work instead.
// It will cause problems for FreeBSD though!, because it turns off
// the needed __BSD_VISIBLE.
//#define _XOPEN_SOURCE 500
#include <string.h> // for memcmp
#if defined(HAVE_SYS_UCONTEXT_H)
#include <sys/ucontext.h>
#elif defined(HAVE_UCONTEXT_H)
#include <ucontext.h> // for ucontext_t (and also mcontext_t)
#elif defined(HAVE_CYGWIN_SIGNAL_H)
#include <cygwin/signal.h>
typedef ucontext ucontext_t;
#endif
// Take the example where function Foo() calls function Bar(). For
// many architectures, Bar() is responsible for setting up and tearing
// down its own stack frame. In that case, it's possible for the
// interrupt to happen when execution is in Bar(), but the stack frame
// is not properly set up (either before it's done being set up, or
// after it's been torn down but before Bar() returns). In those
// cases, the stack trace cannot see the caller function anymore.
//
// GetPC can try to identify this situation, on architectures where it
// might occur, and unwind the current function call in that case to
// avoid false edges in the profile graph (that is, edges that appear
// to show a call skipping over a function). To do this, we hard-code
// in the asm instructions we might see when setting up or tearing
// down a stack frame.
//
// This is difficult to get right: the instructions depend on the
// processor, the compiler ABI, and even the optimization level. This
// is a best effort patch -- if we fail to detect such a situation, or
// mess up the PC, nothing happens; the returned PC is not used for
// any further processing.
struct CallUnrollInfo {
// Offset from (e)ip register where this instruction sequence
// should be matched. Interpreted as bytes. Offset 0 is the next
// instruction to execute. Be extra careful with negative offsets in
// architectures of variable instruction length (like x86) - it is
// not that easy as taking an offset to step one instruction back!
int pc_offset;
// The actual instruction bytes. Feel free to make it larger if you
// need a longer sequence.
unsigned char ins[16];
// How many bytes to match from ins array?
int ins_size;
// The offset from the stack pointer (e)sp where to look for the
// call return address. Interpreted as bytes.
int return_sp_offset;
};
// The dereferences needed to get the PC from a struct ucontext were
// determined at configure time, and stored in the macro
// PC_FROM_UCONTEXT in config.h. The only thing we need to do here,
// then, is to do the magic call-unrolling for systems that support it.
// -- Special case 1: linux x86, for which we have CallUnrollInfo
#if defined(__linux) && defined(__i386) && defined(__GNUC__)
static const CallUnrollInfo callunrollinfo[] = {
// Entry to a function: push %ebp; mov %esp,%ebp
// Top-of-stack contains the caller IP.
{ 0,
{0x55, 0x89, 0xe5}, 3,
0
},
// Entry to a function, second instruction: push %ebp; mov %esp,%ebp
// Top-of-stack contains the old frame, caller IP is +4.
{ -1,
{0x55, 0x89, 0xe5}, 3,
4
},
// Return from a function: RET.
// Top-of-stack contains the caller IP.
{ 0,
{0xc3}, 1,
0
}
};
inline void* GetPC(ucontext_t *signal_ucontext) {
// See comment above struct CallUnrollInfo. Only try instruction
// flow matching if both eip and esp looks reasonable.
const int eip = signal_ucontext->uc_mcontext.gregs[REG_EIP];
const int esp = signal_ucontext->uc_mcontext.gregs[REG_ESP];
if ((eip & 0xffff0000) != 0 && (~eip & 0xffff0000) != 0 &&
(esp & 0xffff0000) != 0) {
char* eip_char = reinterpret_cast<char*>(eip);
for (int i = 0; i < sizeof(callunrollinfo)/sizeof(*callunrollinfo); ++i) {
if (!memcmp(eip_char + callunrollinfo[i].pc_offset,
callunrollinfo[i].ins, callunrollinfo[i].ins_size)) {
// We have a match.
void **retaddr = (void**)(esp + callunrollinfo[i].return_sp_offset);