Commit 54ba673c authored by Carl Friedrich Bolz-Tereick's avatar Carl Friedrich Bolz-Tereick
Browse files

merge py3.7

--HG--
branch : py3.7-newtext-const-arg-caching
Pipeline #21792 passed with stage
in 8 minutes and 41 seconds
......@@ -107,3 +107,7 @@ db1e853f94de42ad711bd930222bd2434e0f900d release-pypy3.6-v7.3.3
51efa818fd9b24f625078c65e8e2f6a5ac24d572 release-pypy3.7-v7.3.4rc2
63df5ef41012b07fa6f9eaba93f05de0eb540f88 release-pypy2.7-v7.3.4
51efa818fd9b24f625078c65e8e2f6a5ac24d572 release-pypy3.7-v7.3.4
e02eba563ef8ad7b5097acd72a81ae1f8ddda796 release-pypy2.7-v7.3.5rc1
d770377ff27b810daef22c770e669ec9542363fe release-pypy3.7-v7.3.5rc1
cc3e122f7896d959fe0d21eb74bab75085c48fdb release-pypy2.7-v7.3.5rc2
cd00c77c619f02c43c23e6a2514e5b685230ef02 release-pypy3.7-v7.3.5rc2
......@@ -19,6 +19,18 @@ from site import USER_BASE
from site import USER_SITE
HAS_USER_SITE = True
# See also: site.py/sysconfig.py
def _get_implementation():
if '__pypy__' in sys.builtin_module_names:
return 'PyPy'
return 'Python'
IMPLEMENTATION = _get_implementation()
IMPLEMENTATION_LOWER = IMPLEMENTATION.lower()
WINDOWS_SCHEME = {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
......@@ -29,16 +41,16 @@ WINDOWS_SCHEME = {
INSTALL_SCHEMES = {
'unix_prefix': {
'purelib': '$base/lib/$implementation_lower$py_version_short/site-packages',
'platlib': '$platbase/lib/$implementation_lower$py_version_short/site-packages',
'headers': '$base/include/$implementation_lower$py_version_short$abiflags/$dist_name',
'purelib': '$base/lib/' + IMPLEMENTATION_LOWER + '$py_version_short/site-packages',
'platlib': '$platbase/lib/' + IMPLEMENTATION_LOWER + '$py_version_short/site-packages',
'headers': '$base/include/' + IMPLEMENTATION_LOWER + '$py_version_short$abiflags/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_home': {
'purelib': '$base/lib/$implementation_lower',
'platlib': '$base/lib/$implementation_lower',
'headers': '$base/include/$implementation_lower/$dist_name',
'purelib': '$base/lib/' + IMPLEMENTATION_LOWER,
'platlib': '$base/lib/' + IMPLEMENTATION_LOWER,
'headers': '$base/include/' + IMPLEMENTATION_LOWER + '/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
......@@ -64,8 +76,8 @@ if HAS_USER_SITE:
INSTALL_SCHEMES['nt_user'] = {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/$implementation$py_version_nodot/Include/$dist_name',
'scripts': '$userbase/$implementation$py_version_nodot/Scripts',
'headers': '$userbase/' + IMPLEMENTATION + '$py_version_nodot/Include/$dist_name',
'scripts': '$userbase/' + IMPLEMENTATION + '$py_version_nodot/Scripts',
'data' : '$userbase',
}
......@@ -73,7 +85,7 @@ if HAS_USER_SITE:
'purelib': '$usersite',
'platlib': '$usersite',
'headers':
'$userbase/include/$implementation_lower$py_version_short$abiflags/$dist_name',
'$userbase/include/' + IMPLEMENTATION_LOWER + '$py_version_short$abiflags/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
}
......@@ -83,11 +95,6 @@ if HAS_USER_SITE:
# and to SCHEME_KEYS here.
SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
def _get_implementation():
if '__pypy__' in sys.builtin_module_names:
return 'PyPy'
return 'Python'
class install(Command):
......@@ -317,8 +324,6 @@ class install(Command):
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
'abiflags': abiflags,
'implementation_lower': _get_implementation().lower(),
'implementation': _get_implementation(),
}
if HAS_USER_SITE:
......
......@@ -612,6 +612,35 @@ class UrlParseTestCase(unittest.TestCase):
with self.assertRaisesRegex(ValueError, "out of range"):
p.port
def test_urlsplit_remove_unsafe_bytes(self):
# Remove ASCII tabs and newlines from input
url = "http://www.python.org/java\nscript:\talert('msg\r\n')/#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "www.python.org")
self.assertEqual(p.path, "/javascript:alert('msg')/")
self.assertEqual(p.query, "")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), "http://www.python.org/javascript:alert('msg')/#frag")
# Remove ASCII tabs and newlines from input as bytes.
url = b"http://www.python.org/java\nscript:\talert('msg\r\n')/#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, b"http")
self.assertEqual(p.netloc, b"www.python.org")
self.assertEqual(p.path, b"/javascript:alert('msg')/")
self.assertEqual(p.query, b"")
self.assertEqual(p.fragment, b"frag")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, b"www.python.org")
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), b"http://www.python.org/javascript:alert('msg')/#frag")
def test_attributes_bad_port(self):
"""Check handling of invalid ports."""
for bytes in (False, True):
......
......@@ -76,6 +76,9 @@ scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'0123456789'
'+-.')
# Unsafe bytes to be removed per WHATWG spec
_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n']
# XXX: Consider replacing with functools.lru_cache
MAX_CACHE_SIZE = 20
_parse_cache = {}
......@@ -424,6 +427,10 @@ def urlsplit(url, scheme='', allow_fragments=True):
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
for b in _UNSAFE_URL_BYTES_TO_REMOVE:
url = url.replace(b, "")
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
......
......@@ -946,7 +946,7 @@ class AbstractBasicAuthHandler:
# (single quotes are a violation of the RFC, but appear in the wild)
rx = re.compile('(?:^|,)' # start of the string or ','
'[ \t]*' # optional whitespaces
'([^ \t]+)' # scheme like "Basic"
'([^ \t,]+)' # scheme like "Basic"
'[ \t]+' # mandatory whitespaces
# realm=xxx
# realm='xxx'
......
......@@ -12,11 +12,21 @@ We are releasing a PyPy 7.3.5 with bugfixes for PyPy 7.3.4, released April 4.
- The new windows 64-bit builds improperly named c-extension modules
with the same extension as the 32-bit build (issue 3443_)
- Use the windows-specific ``PC/pyconfig.h`` rather than the posix one
- A change to the python 3.7 ``sysconfig.get_config_var('LIBDIR')`` was wrong,
leading to problems finding `libpypy3-c.so` for embedded PyPy (issue 3442_).
- Two upstream (CPython) security patches were applied: `BPO 42988`_ to remove
``pydoc.getfile`` and `BPO 43285`_ to not trust the ``PASV`` response in
``ftplib``.
- Instantiate ``distutils.command.install`` schema for PyPy-specific
``implementation_lower``
- Four upstream (CPython) security patches were applied:
- `BPO 42988`_ to remove ``pydoc.getfile``
- `BPO 43285`_ to not trust the ``PASV`` response in ``ftplib``.
- `BPO 43075`_ to remove a possible ReDoS in urllib AbstractBasicAuthHandler
- `BPO 43882`_ to sanitize urls containing ASCII newline and tabs in
``urllib.parse``
- When assigning the full slice of a list, evaluate the rhs before clearing the
list (issue 3440_)
- On Python2, ``PyUnicode_Contains`` accepts bytes as well as unicode.
- Update the packaged sqlite3 to 3.35.5 on windows. While not a bugfix, this
seems like an easy win.
......@@ -81,6 +91,9 @@ PyPy does support ARM 32 bit processors, but does not release binaries.
.. _3443: https://foss.heptapod.net/pypy/pypy/-/issues/3443
.. _3442: https://foss.heptapod.net/pypy/pypy/-/issues/3442
.. _3440: https://foss.heptapod.net/pypy/pypy/-/issues/3440
.. _`BPO 42988`: https://bugs.python.org/issue42988
.. _`BPO 43285`: https://bugs.python.org/issue43285
.. _`BPO 43075`: https://bugs.python.org/issue43075
.. _`BPO 43882`: https://bugs.python.org/issue43882
......@@ -16,3 +16,10 @@ formatting of big numbers.
Optimize dictionary operations in the JIT a bit more, making it possible to
completely optimize away the creation of dictionaries in more situations (such
as calling the ``dict.update`` method on known dicts).
.. branch: bpo-35714
Add special error messange for ``'\0'`` in ``rstruct.formatiterator``
(bpo-35714)
.. minor branches not worth to document
.. branch: fix-checkmodule-2
......@@ -16,3 +16,10 @@ Produce better error messages for IndentationErrors (showing statement and line
that opened the block that is missing), AttributeErrors and NameErrors (showing
suggestions which name could have been meant instead in case of typos). This
follows the upcoming CPython 3.10 features.
.. branch: distutils-implementation
Instantiate the ``distutils.command.install`` schema for the python
implementation (issue 3436)
.. branch: py3.7-bpo-30245
Avoid overflow in ``struct.pack_into`` error message (BPO 30245)
......@@ -15,7 +15,8 @@ from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rutf8 import (check_utf8, next_codepoint_pos,
codepoints_in_utf8, codepoints_in_utf8,
Utf8StringBuilder)
from rpython.rlib import rlocale
from rpython.rlib import rlocale, jit
from rpython.rlib.objectmodel import always_inline
STATE_ZERO, STATE_OK, STATE_DETACHED = range(3)
......@@ -27,30 +28,33 @@ SEEN_ALL = SEEN_CR | SEEN_LF | SEEN_CRLF
_WINDOWS = sys.platform == 'win32'
def make_newlines_dict(space):
return {
SEEN_CR: space.newtext("\r", 1),
SEEN_LF: space.newtext("\n", 1),
SEEN_CRLF: space.newtext("\r\n", 2),
SEEN_CR | SEEN_LF: space.newtuple(
[space.newtext("\r", 1),
space.newtext("\n", 1)]),
SEEN_CR | SEEN_CRLF: space.newtuple(
[space.newtext("\r", 1),
space.newtext("\r\n", 2)]),
SEEN_LF | SEEN_CRLF: space.newtuple(
[space.newtext("\n", 1),
space.newtext("\r\n", 2)]),
SEEN_CR | SEEN_LF | SEEN_CRLF: space.newtuple(
[space.newtext("\r", 1),
space.newtext("\n", 1),
space.newtext("\r\n", 2)]),
}
class W_IncrementalNewlineDecoder(W_Root):
seennl = 0
pendingcr = False
w_decoder = None
def __init__(self, space):
self.w_newlines_dict = {
SEEN_CR: space.newutf8("\r", 1),
SEEN_LF: space.newutf8("\n", 1),
SEEN_CRLF: space.newutf8("\r\n", 2),
SEEN_CR | SEEN_LF: space.newtuple(
[space.newutf8("\r", 1),
space.newutf8("\n", 1)]),
SEEN_CR | SEEN_CRLF: space.newtuple(
[space.newutf8("\r", 1),
space.newutf8("\r\n", 2)]),
SEEN_LF | SEEN_CRLF: space.newtuple(
[space.newutf8("\n", 1),
space.newutf8("\r\n", 2)]),
SEEN_CR | SEEN_LF | SEEN_CRLF: space.newtuple(
[space.newutf8("\r", 1),
space.newutf8("\n", 1),
space.newutf8("\r\n", 2)]),
}
pass
@unwrap_spec(translate=int)
def descr_init(self, space, w_decoder, translate, w_errors=None):
......@@ -64,7 +68,7 @@ class W_IncrementalNewlineDecoder(W_Root):
self.seennl = 0
def newlines_get_w(self, space):
return self.w_newlines_dict.get(self.seennl, space.w_None)
return space.fromcache(make_newlines_dict).get(self.seennl, space.w_None)
@unwrap_spec(final=int)
def decode_w(self, space, w_input, final=False):
......@@ -84,7 +88,6 @@ class W_IncrementalNewlineDecoder(W_Root):
"decoder should return a string result")
output, output_len = space.utf8_len_w(w_output)
output_len = len(output)
if self.pendingcr and (final or output_len):
output = '\r' + output
self.pendingcr = False
......@@ -107,7 +110,8 @@ class W_IncrementalNewlineDecoder(W_Root):
# desired, all in one pass.
seennl = self.seennl
if output.find('\r') < 0:
rpos = output.find('\r')
if rpos < 0:
# If no \r, quick scan for a possible "\n" character.
# (there's nothing else to be done, even when in translation mode)
if output.find('\n') >= 0:
......@@ -129,11 +133,13 @@ class W_IncrementalNewlineDecoder(W_Root):
i += 1
else:
seennl |= SEEN_CR
elif output.find('\r') >= 0:
else:
assert rpos >= 0
# Translate!
builder = StringBuilder(len(output))
i = 0
while i < output_len:
builder.append_slice(output, 0, rpos)
i = rpos
while i < len(output):
c = output[i]
i += 1
if c == '\n':
......@@ -142,6 +148,7 @@ class W_IncrementalNewlineDecoder(W_Root):
if i < len(output) and output[i] == '\n':
seennl |= SEEN_CRLF
i += 1
output_len -= 1
else:
seennl |= SEEN_CR
builder.append('\n')
......@@ -150,8 +157,7 @@ class W_IncrementalNewlineDecoder(W_Root):
output = builder.build()
self.seennl |= seennl
lgt = check_utf8(output, True)
return space.newutf8(output, lgt)
return space.newutf8(output, output_len)
def reset_w(self, space):
self.seennl = 0
......@@ -159,7 +165,7 @@ class W_IncrementalNewlineDecoder(W_Root):
if self.w_decoder and not space.is_w(self.w_decoder, space.w_None):
space.call_method(self.w_decoder, "reset")
def getstate_w(self, space):
def getstate_u(self, space):
if self.w_decoder and not space.is_w(self.w_decoder, space.w_None):
w_state = space.call_method(self.w_decoder, "getstate")
w_buffer, w_flag = space.unpackiterable(w_state, 2)
......@@ -170,6 +176,10 @@ class W_IncrementalNewlineDecoder(W_Root):
flag <<= 1
if self.pendingcr:
flag |= 1
return w_buffer, flag
def getstate_w(self, space):
w_buffer, flag = self.getstate_u(space)
return space.newtuple([w_buffer, space.newint(flag)])
def setstate_w(self, space, w_state):
......@@ -327,8 +337,7 @@ class DecodeBuffer(object):
def set(self, space, w_decoded):
check_decoded(space, w_decoded)
self.ulen = space.len_w(w_decoded)
self.text = space.utf8_w(w_decoded)
self.text, self.ulen = space.utf8_len_w(w_decoded)
self.pos = 0
self.upos = 0
......@@ -417,7 +426,8 @@ class DecodeBuffer(object):
return False
ch = self.text[self.pos]
if ch == '\n':
self._advance_codepoint()
self.pos += 1
self.upos += 1
return True
else:
return True
......@@ -431,9 +441,10 @@ class DecodeBuffer(object):
if self.exhausted():
return False
ch = self.text[self.pos]
self._advance_codepoint()
scanned += 1
if ch == '\r':
self.pos += 1
self.upos += 1
if scanned >= limit:
return False
if self.exhausted():
......@@ -443,8 +454,11 @@ class DecodeBuffer(object):
self.upos -= 1
return False
if self.text[self.pos] == '\n':
self._advance_codepoint()
self.pos += 1
self.upos += 1
return True
else:
self._advance_codepoint()
return False
def find_char(self, marker, limit):
......@@ -477,12 +491,15 @@ class DecodeBuffer(object):
return False
# this is never true if self.text[pos] is part of a larger char
found = self.text[self.pos] == marker
self._advance_codepoint()
if found:
self.pos += 1
self.upos += 1
return True
self._advance_codepoint()
scanned += 1
return False
@always_inline
def _advance_codepoint(self):
# must only be called after checking self.exhausted()!
self.pos = next_codepoint_pos(self.text, self.pos)
......@@ -677,7 +694,7 @@ class W_TextIOWrapper(W_TextIOBase):
"I/O operation on uninitialized object")
def _check_attached(self, space):
if self.state == STATE_DETACHED:
if jit.promote(self.state) == STATE_DETACHED:
raise oefmt(space.w_ValueError,
"underlying buffer has been detached")
self._check_init(space)
......@@ -785,19 +802,25 @@ class W_TextIOWrapper(W_TextIOBase):
if self.telling:
# To prepare for tell(), we need to snapshot a point in the file
# where the decoder's input buffer is empty.
w_state = space.call_method(self.w_decoder, "getstate")
if (not space.isinstance_w(w_state, space.w_tuple)
or space.len_w(w_state) != 2):
raise oefmt(space.w_TypeError, "illegal decoder state")
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
w_dec_buffer, w_dec_flags = space.unpackiterable(w_state, 2)
if not space.isinstance_w(w_dec_buffer, space.w_bytes):
msg = ("illegal decoder state: the first value should be a "
"bytes object not '%T'")
raise oefmt(space.w_TypeError, msg, w_dec_buffer)
w_decoder = self.w_decoder
# fast path for the common case of decoder being
# W_IncrementalNewlineDecoder. avoids (un)wrapping the tuple too
if type(w_decoder) is W_IncrementalNewlineDecoder:
w_dec_buffer, dec_flags = w_decoder.getstate_u(space)
else:
w_state = space.call_method(self.w_decoder, "getstate")
if (not space.isinstance_w(w_state, space.w_tuple)
or space.len_w(w_state) != 2):
raise oefmt(space.w_TypeError, "illegal decoder state")
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
w_dec_buffer, w_dec_flags = space.unpackiterable(w_state, 2)
if not space.isinstance_w(w_dec_buffer, space.w_bytes):
msg = ("illegal decoder state: the first value should be a "
"bytes object not '%T'")
raise oefmt(space.w_TypeError, msg, w_dec_buffer)
dec_flags = space.int_w(w_dec_flags)
dec_buffer = space.bytes_w(w_dec_buffer)
dec_flags = space.int_w(w_dec_flags)
else:
dec_buffer = None
dec_flags = 0
......@@ -815,8 +838,12 @@ class W_TextIOWrapper(W_TextIOBase):
raise oefmt(space.w_TypeError, msg, func_name, w_input)
eof = input_buf.getlength() == 0
w_decoded = space.call_method(self.w_decoder, "decode",
w_input, space.newbool(eof))
w_decoder = self.w_decoder
if type(w_decoder) is W_IncrementalNewlineDecoder:
w_decoded = w_decoder.decode_w(space, w_input, eof)
else:
w_decoded = space.call_method(w_decoder, "decode",
w_input, space.newbool(eof))
self.decoded.set(space, w_decoded)
if space.len_w(w_decoded) > 0:
eof = False
......@@ -891,15 +918,14 @@ class W_TextIOWrapper(W_TextIOBase):
return space.newutf8(builder.build(), builder.getlength())
def _scan_line_ending(self, limit):
if self.readtranslate:
# Newlines are already translated, only search for \n
return self.decoded.find_char('\n', limit)
if self.readuniversal:
return self.decoded.find_newline_universal(limit)
else:
if self.readtranslate:
# Newlines are already translated, only search for \n
newline = '\n'
else:
# Non-universal mode.
newline = self.readnl
# Non-universal mode.
newline = self.readnl
if newline == '\r\n':
return self.decoded.find_crlf(limit)
else:
......
......@@ -57,6 +57,31 @@ def test_readline(space, data, mode):
break
assert txt.startswith(u''.join(lines))
@given(data=st_readline())
@settings(deadline=None, database=None)
@example(data=(u'\n\r\n', [0, -1, 2, -1, 0, -1]))
def test_readline_none(space, data):
txt, limits = data
w_stream = W_BytesIO(space)
w_stream.descr_init(space, space.newbytes(txt.encode('utf-8')))
w_textio = W_TextIOWrapper(space)
w_textio.descr_init(
space, w_stream,
encoding='utf-8', w_errors=space.newtext('surrogatepass'),
w_newline=space.w_None)
lines = []
for limit in limits:
w_line = w_textio.readline_w(space, space.newint(limit))
line = space.utf8_w(w_line).decode('utf-8')
if limit >= 0:
assert len(line) <= limit
if line:
lines.append(line)
elif limit:
break
output = txt.replace("\r\n", "\n").replace("\r", "\n")
assert output.startswith(u''.join(lines))
@given(st.text())
def test_read_buffer(text):
buf = DecodeBuffer(text.encode('utf8'), len(text))
......
import sys
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
......
This diff is collapsed.
......@@ -47,9 +47,16 @@ from pypy.objspace.std.typeobject import W_TypeObject, find_best_base
from rpython.tool.cparser import CTypeSpace
DEBUG_WRAPPER = True
if sys.platform == 'win32':
dash = '_'
WIN32 = True
else:
dash = ''
WIN32 = False
pypydir = py.path.local(pypydir)
include_dir = pypydir / 'module' / 'cpyext' / 'include'
pc_dir = pypydir / 'module' / 'cpyext' / 'PC'
parse_dir = pypydir / 'module' / 'cpyext' / 'parse'
source_dir = pypydir / 'module' / 'cpyext' / 'src'
translator_c_dir = py.path.local(cdir)
......@@ -59,6 +66,8 @@ include_dirs = [
translator_c_dir,
udir,
]
if WIN32:
include_dirs.insert(0, pc_dir)
configure_eci = ExternalCompilationInfo(
include_dirs=include_dirs,
......@@ -92,14 +101,6 @@ assert CONST_WSTRING == rffi.CWCHARP
# FILE* interface
if sys.platform == 'win32':
dash = '_'
WIN32 = True
else:
dash = ''
WIN32 = False
def fclose(fp):
try:
with FdValidator(c_fileno(fp)):
......@@ -139,6 +140,7 @@ Py_TPFLAGS_HEAPTYPE
Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_MAX_NDIMS
Py_CLEANUP_SUPPORTED PyBUF_READ
PyBUF_FORMAT PyBUF_ND PyBUF_STRIDES PyBUF_WRITABLE PyBUF_SIMPLE PyBUF_WRITE
PY_SSIZE_T_MAX PY_SSIZE_T_MIN
""".split()
for name in ('LONG', 'LIST', 'TUPLE', 'UNICODE', 'DICT', 'BASE_EXC',
......@@ -190,6 +192,11 @@ def copy_header_files(cts, dstdir, copy_numpy_headers):
numpy_include_dir = include_dir / '_numpypy' / 'numpy'
numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl')
_copy_header_files(numpy_headers, numpy_dstdir)
if WIN32:
# Override pyconfig.h with the one for windows
PC_dir = pypydir / 'module' / 'cpyext' / 'PC'
headers = PC_dir.listdir('*.h')
_copy_header_files(headers, dstdir)
class NotSpecified(object):
......