Skip to content

Instantly share code, notes, and snippets.

@ichard26
Created December 26, 2021 02:04
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ichard26/ca6c6ad4bd1de5152d95418c8645354b to your computer and use it in GitHub Desktop.
Save ichard26/ca6c6ad4bd1de5152d95418c8645354b to your computer and use it in GitHub Desktop.
[21:02:18] Loaded first analysis: /home/ichard26/programming/oss/black/more-consistent-spacing-compiled.json (cached)
Loaded second analysis: /home/ichard26/programming/oss/black/hug-power-op-compiled-4.json (cached)
╭───────────────────────── Summary ──────────────────────────╮
│ 11 projects & 230 files changed / 1586 changes [+793/-793] │
│ │
│ ... out of 2 020 758 lines, 9650 files & 23 projects │
╰────────────────────────────────────────────────────────────╯
[django - https://github.com/django/django.git]
╰─> revision 569a33579c3cca5f801c544d9b52a34e3c779424
--- a/django:django/contrib/auth/hashers.py
+++ b/django:django/contrib/auth/hashers.py
@@ -552,11 +552,11 @@
algorithm = 'scrypt'
block_size = 8
maxmem = 0
parallelism = 1
- work_factor = 2 ** 14
+ work_factor = 2**14
def encode(self, password, salt, n=None, r=None, p=None):
self._check_encode_args(password, salt)
n = n or self.work_factor
r = r or self.block_size
--- a/django:django/contrib/gis/measure.py
+++ b/django:django/contrib/gis/measure.py
@@ -347,11 +347,11 @@
class Area(MeasureBase):
STANDARD_UNIT = AREA_PREFIX + Distance.STANDARD_UNIT
# Getting the square units values and the alias dictionary.
- UNITS = {'%s%s' % (AREA_PREFIX, k): v ** 2 for k, v in Distance.UNITS.items()}
+ UNITS = {'%s%s' % (AREA_PREFIX, k): v**2 for k, v in Distance.UNITS.items()}
ALIAS = {k: '%s%s' % (AREA_PREFIX, v) for k, v in Distance.ALIAS.items()}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __truediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
--- a/django:django/contrib/humanize/templatetags/humanize.py
+++ b/django:django/contrib/humanize/templatetags/humanize.py
@@ -130,11 +130,11 @@
abs_value = abs(value)
if abs_value < 1000000:
return value
for exponent, converter in intword_converters:
- large_number = 10 ** exponent
+ large_number = 10**exponent
if abs_value < large_number * 1000:
new_value = value / large_number
rounded_value = round_away_from_one(new_value)
return converter(abs(rounded_value)) % {
'value': defaultfilters.floatformat(new_value, 1),
--- a/django:django/core/files/base.py
+++ b/django:django/core/files/base.py
@@ -4,11 +4,11 @@
from django.core.files.utils import FileProxyMixin
from django.utils.functional import cached_property
class File(FileProxyMixin):
- DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
+ DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file, name=None):
self.file = file
if name is None:
name = getattr(file, 'name', None)
--- a/django:django/core/files/uploadhandler.py
+++ b/django:django/core/files/uploadhandler.py
@@ -71,11 +71,11 @@
class FileUploadHandler:
"""
Base class for streaming upload handlers.
"""
- chunk_size = 64 * 2 ** 10 # : The default chunk size is 64 KB.
+ chunk_size = 64 * 2**10 # : The default chunk size is 64 KB.
def __init__(self, request=None):
self.file_name = None
self.content_type = None
self.content_length = None
--- a/django:django/core/handlers/asgi.py
+++ b/django:django/core/handlers/asgi.py
@@ -132,11 +132,11 @@
class ASGIHandler(base.BaseHandler):
"""Handler for ASGI requests."""
request_class = ASGIRequest
# Size to chunk response bodies into for multiple response messages.
- chunk_size = 2 ** 16
+ chunk_size = 2**16
def __init__(self):
super().__init__()
self.load_middleware(is_async=True)
--- a/django:django/db/backends/oracle/features.py
+++ b/django:django/db/backends/oracle/features.py
@@ -56,11 +56,11 @@
"""
supports_callproc_kwargs = True
supports_over_clause = True
supports_frame_range_fixed_distance = True
supports_ignore_conflicts = False
- max_query_params = 2 ** 16 - 1
+ max_query_params = 2**16 - 1
supports_partial_indexes = False
supports_slicing_ordering_in_compound = True
allows_multiple_constraints_on_same_fields = False
supports_boolean_expr_in_select_clause = False
supports_primitives_in_json_field = False
--- a/django:django/http/multipartparser.py
+++ b/django:django/http/multipartparser.py
@@ -103,11 +103,11 @@
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
- self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
+ self._chunk_size = min([2**31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
--- a/django:django/test/runner.py
+++ b/django:django/test/runner.py
@@ -539,11 +539,11 @@
return h.hexdigest()
def __init__(self, seed=None):
if seed is None:
# Limit seeds to 10 digits for simpler output.
- seed = random.randint(0, 10 ** 10 - 1)
+ seed = random.randint(0, 10**10 - 1)
seed_source = 'generated'
else:
seed_source = 'given'
self.seed = seed
self.seed_source = seed_source
--- a/django:tests/admin_views/tests.py
+++ b/django:tests/admin_views/tests.py
@@ -4901,11 +4901,11 @@
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username='super', password='secret', email='super@example.com'
)
file1 = tempfile.NamedTemporaryFile(suffix=".file1")
- file1.write(b'a' * (2 ** 21))
+ file1.write(b'a' * (2**21))
filename = file1.name
file1.close()
cls.gallery = Gallery.objects.create(name='Test Gallery')
cls.picture = Picture.objects.create(
name='Test Picture',
--- a/django:tests/auth_tests/test_hashers.py
+++ b/django:tests/auth_tests/test_hashers.py
@@ -819,11 +819,11 @@
finally:
setattr(hasher, attr, old_value)
def test_scrypt_upgrade(self):
tests = [
- ('work_factor', 'work factor', 2 ** 11),
+ ('work_factor', 'work factor', 2**11),
('block_size', 'block size', 10),
('parallelism', 'parallelism', 2),
]
for attr, summary_key, new_value in tests:
with self.subTest(attr=attr):
--- a/django:tests/backends/oracle/test_operations.py
+++ b/django:tests/backends/oracle/test_operations.py
@@ -17,11 +17,11 @@
)
self.assertEqual(seq_name, 'SCHEMA_AUTHORWITHEVENLOB0B8_SQ')
def test_bulk_batch_size(self):
# Oracle restricts the number of parameters in a query.
- objects = range(2 ** 16)
+ objects = range(2**16)
self.assertEqual(connection.ops.bulk_batch_size([], objects), len(objects))
# Each field is a parameter for each object.
self.assertEqual(
connection.ops.bulk_batch_size(['id'], objects),
connection.features.max_query_params,
--- a/django:tests/backends/tests.py
+++ b/django:tests/backends/tests.py
@@ -287,30 +287,30 @@
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
- args = [(i, i ** 2) for i in range(-5, 6)]
+ args = [(i, i**2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
- self.assertEqual(square.square, i ** 2)
+ self.assertEqual(square.square, i**2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
- args = ((i, i ** 2) for i in range(-3, 2))
+ args = ((i, i**2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 5)
- args = ((i, i ** 2) for i in range(3, 7))
+ args = ((i, i**2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 9)
@@ -322,24 +322,24 @@
self.assertEqual(Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
- args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
+ args = [{'root': i, 'square': i**2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
- self.assertEqual(square.square, i ** 2)
+ self.assertEqual(square.square, i**2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
- args = ({'root': i, 'square': i ** 2} for i in range(-3, 2))
+ args = ({'root': i, 'square': i**2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 5)
- args = ({'root': i, 'square': i ** 2} for i in range(3, 7))
+ args = ({'root': i, 'square': i**2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 9)
--- a/django:tests/bulk_create/models.py
+++ b/django:tests/bulk_create/models.py
@@ -84,11 +84,11 @@
duration_field = models.DurationField(null=True, default=datetime.timedelta(1))
float_field = models.FloatField(null=True, default=3.2)
integer_field = models.IntegerField(null=True, default=2)
null_boolean_field = models.BooleanField(null=True, default=False)
positive_big_integer_field = models.PositiveBigIntegerField(
- null=True, default=2 ** 63 - 1
+ null=True, default=2**63 - 1
)
positive_integer_field = models.PositiveIntegerField(null=True, default=3)
positive_small_integer_field = models.PositiveSmallIntegerField(
null=True, default=4
)
--- a/django:tests/cache/tests.py
+++ b/django:tests/cache/tests.py
@@ -1516,11 +1516,11 @@
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
- max_value_length = 2 ** 20
+ max_value_length = 2**20
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
--- a/django:tests/db_functions/comparison/test_cast.py
+++ b/django:tests/db_functions/comparison/test_cast.py
@@ -99,11 +99,11 @@
DTModel.objects.create(start_datetime=dt_value)
dtm = DTModel.objects.annotate(
start_datetime_as_time=Cast('start_datetime', models.TimeField())
).first()
rounded_ms = int(
- round(0.234567, connection.features.time_cast_precision) * 10 ** 6
+ round(0.234567, connection.features.time_cast_precision) * 10**6
)
self.assertEqual(
dtm.start_datetime_as_time, datetime.time(12, 42, 10, rounded_ms)
)
--- a/django:tests/db_functions/math/test_power.py
+++ b/django:tests/db_functions/math/test_power.py
@@ -18,17 +18,17 @@
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('1.0'), n2=Decimal('-0.6'))
obj = DecimalModel.objects.annotate(n_power=Power('n1', 'n2')).first()
self.assertIsInstance(obj.n_power, Decimal)
- self.assertAlmostEqual(obj.n_power, Decimal(obj.n1 ** obj.n2))
+ self.assertAlmostEqual(obj.n_power, Decimal(obj.n1**obj.n2))
def test_float(self):
FloatModel.objects.create(f1=2.3, f2=1.1)
obj = FloatModel.objects.annotate(f_power=Power('f1', 'f2')).first()
self.assertIsInstance(obj.f_power, float)
- self.assertAlmostEqual(obj.f_power, obj.f1 ** obj.f2)
+ self.assertAlmostEqual(obj.f_power, obj.f1**obj.f2)
def test_integer(self):
IntegerModel.objects.create(small=-1, normal=20, big=3)
obj = IntegerModel.objects.annotate(
small_power=Power('small', 'normal'),
@@ -36,8 +36,8 @@
big_power=Power('big', 'small'),
).first()
self.assertIsInstance(obj.small_power, float)
self.assertIsInstance(obj.normal_power, float)
self.assertIsInstance(obj.big_power, float)
- self.assertAlmostEqual(obj.small_power, obj.small ** obj.normal)
- self.assertAlmostEqual(obj.normal_power, obj.normal ** obj.big)
- self.assertAlmostEqual(obj.big_power, obj.big ** obj.small)
+ self.assertAlmostEqual(obj.small_power, obj.small**obj.normal)
+ self.assertAlmostEqual(obj.normal_power, obj.normal**obj.big)
+ self.assertAlmostEqual(obj.big_power, obj.big**obj.small)
--- a/django:tests/file_uploads/tests.py
+++ b/django:tests/file_uploads/tests.py
@@ -93,14 +93,14 @@
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".file1") as file1, file(suffix=".file2") as file2:
- file1.write(b'a' * (2 ** 21))
+ file1.write(b'a' * (2**21))
file1.seek(0)
- file2.write(b'a' * (10 * 2 ** 20))
+ file2.write(b'a' * (10 * 2**20))
file2.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': file1,
@@ -157,11 +157,11 @@
def test_unicode_file_name(self):
with sys_tempfile.TemporaryDirectory() as temp_dir:
# This file contains Chinese symbols and an accented char in the name.
with open(os.path.join(temp_dir, UNICODE_FILENAME), 'w+b') as file1:
- file1.write(b'b' * (2 ** 10))
+ file1.write(b'b' * (2**10))
file1.seek(0)
response = self.client.post('/unicode_name/', {'file_unicode': file1})
self.assertEqual(response.status_code, 200)
def test_unicode_file_name_rfc2231(self):
@@ -497,26 +497,26 @@
def test_custom_upload_handler(self):
file = tempfile.NamedTemporaryFile
with file() as smallfile, file() as bigfile:
# A small file (under the 5M quota)
- smallfile.write(b'a' * (2 ** 21))
+ smallfile.write(b'a' * (2**21))
smallfile.seek(0)
# A big file (over the quota)
- bigfile.write(b'a' * (10 * 2 ** 20))
+ bigfile.write(b'a' * (10 * 2**20))
bigfile.seek(0)
# Small file posting should work.
self.assertIn('f', self.client.post('/quota/', {'f': smallfile}).json())
# Large files don't go through.
self.assertNotIn('f', self.client.post("/quota/", {'f': bigfile}).json())
def test_broken_custom_upload_handler(self):
with tempfile.NamedTemporaryFile() as file:
- file.write(b'a' * (2 ** 21))
+ file.write(b'a' * (2**21))
file.seek(0)
msg = (
'You cannot alter upload handlers after the upload has been processed.'
)
@@ -556,17 +556,17 @@
self.assertIs(os.path.exists(temp_path), False)
def test_fileupload_getlist(self):
file = tempfile.NamedTemporaryFile
with file() as file1, file() as file2, file() as file2a:
- file1.write(b'a' * (2 ** 23))
+ file1.write(b'a' * (2**23))
file1.seek(0)
- file2.write(b'a' * (2 * 2 ** 18))
+ file2.write(b'a' * (2 * 2**18))
file2.seek(0)
- file2a.write(b'a' * (5 * 2 ** 20))
+ file2a.write(b'a' * (5 * 2**20))
file2a.seek(0)
response = self.client.post(
'/getlist_count/',
{
--- a/django:tests/file_uploads/uploadhandler.py
+++ b/django:tests/file_uploads/uploadhandler.py
@@ -15,11 +15,11 @@
"""
This test upload handler terminates the connection if more than a quota
(5MB) is uploaded.
"""
- QUOTA = 5 * 2 ** 20 # 5 MB
+ QUOTA = 5 * 2**20 # 5 MB
def __init__(self, request=None):
super().__init__(request)
self.total_upload = 0
--- a/django:tests/gis_tests/geoapp/test_expressions.py
+++ b/django:tests/gis_tests/geoapp/test_expressions.py
@@ -17,11 +17,11 @@
@skipUnlessDBFeature('supports_transform')
def test_geometry_value_annotation_different_srid(self):
p = Point(1, 1, srid=32140)
point = City.objects.annotate(p=Value(p, GeometryField(srid=4326))).first().p
- self.assertTrue(point.equals_exact(p.transform(4326, clone=True), 10 ** -5))
+ self.assertTrue(point.equals_exact(p.transform(4326, clone=True), 10**-5))
self.assertEqual(point.srid, 4326)
@skipUnlessDBFeature('supports_geography')
def test_geography_value(self):
p = Polygon(((1, 1), (1, 2), (2, 2), (2, 1), (1, 1)))
--- a/django:tests/many_to_one/tests.py
+++ b/django:tests/many_to_one/tests.py
@@ -686,20 +686,20 @@
def test_fk_to_bigautofield(self):
ch = City.objects.create(name='Chicago')
District.objects.create(city=ch, name='Far South')
District.objects.create(city=ch, name='North')
- ny = City.objects.create(name='New York', id=2 ** 33)
+ ny = City.objects.create(name='New York', id=2**33)
District.objects.create(city=ny, name='Brooklyn')
District.objects.create(city=ny, name='Manhattan')
def test_fk_to_smallautofield(self):
us = Country.objects.create(name='United States')
City.objects.create(country=us, name='Chicago')
City.objects.create(country=us, name='New York')
- uk = Country.objects.create(name='United Kingdom', id=2 ** 11)
+ uk = Country.objects.create(name='United Kingdom', id=2**11)
City.objects.create(country=uk, name='London')
City.objects.create(country=uk, name='Edinburgh')
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
--- a/django:tests/migrations/test_operations.py
+++ b/django:tests/migrations/test_operations.py
@@ -4316,13 +4316,13 @@
def create_data(models, schema_editor):
Author = models.get_model("test_author", "Author")
Book = models.get_model("test_book", "Book")
author1 = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author1)
- Book.objects.create(id=2 ** 33, title="A farewell to arms", author=author1)
-
- author2 = Author.objects.create(id=2 ** 33, name="Remarque")
+ Book.objects.create(id=2**33, title="A farewell to arms", author=author1)
+
+ author2 = Author.objects.create(id=2**33, name="Remarque")
Book.objects.create(title="All quiet on the western front", author=author2)
Book.objects.create(title="Arc de Triomphe", author=author2)
create_author = migrations.CreateModel(
"Author",
@@ -4483,27 +4483,27 @@
def test_autofield__bigautofield_foreignfield_growth(self):
"""A field may be migrated from AutoField to BigAutoField."""
self._test_autofield_foreignfield_growth(
models.AutoField,
models.BigAutoField,
- 2 ** 33,
+ 2**33,
)
def test_smallfield_autofield_foreignfield_growth(self):
"""A field may be migrated from SmallAutoField to AutoField."""
self._test_autofield_foreignfield_growth(
models.SmallAutoField,
models.AutoField,
- 2 ** 22,
+ 2**22,
)
def test_smallfield_bigautofield_foreignfield_growth(self):
"""A field may be migrated from SmallAutoField to BigAutoField."""
self._test_autofield_foreignfield_growth(
models.SmallAutoField,
models.BigAutoField,
- 2 ** 33,
+ 2**33,
)
def test_run_python_noop(self):
"""
#24098 - Tests no-op RunPython operations.
--- a/django:tests/model_fields/test_uuid.py
+++ b/django:tests/model_fields/test_uuid.py
@@ -84,18 +84,18 @@
models.UUIDField().to_python(0),
uuid.UUID('00000000-0000-0000-0000-000000000000'),
)
# Works for integers less than 128 bits.
self.assertEqual(
- models.UUIDField().to_python((2 ** 128) - 1),
+ models.UUIDField().to_python((2**128) - 1),
uuid.UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'),
)
def test_to_python_int_too_large(self):
# Fails for integers larger than 128 bits.
with self.assertRaises(exceptions.ValidationError):
- models.UUIDField().to_python(2 ** 128)
+ models.UUIDField().to_python(2**128)
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
--- a/django:tests/queries/test_db_returning.py
+++ b/django:tests/queries/test_db_returning.py
@@ -45,11 +45,11 @@
self.assertTrue(obj.pk)
self.assertIsInstance(obj.created, datetime.datetime)
@skipUnlessDBFeature('can_return_rows_from_bulk_insert')
def test_bulk_insert(self):
- objs = [ReturningModel(), ReturningModel(pk=2 ** 11), ReturningModel()]
+ objs = [ReturningModel(), ReturningModel(pk=2**11), ReturningModel()]
ReturningModel.objects.bulk_create(objs)
for obj in objs:
with self.subTest(obj=obj):
self.assertTrue(obj.pk)
self.assertIsInstance(obj.created, datetime.datetime)
--- a/django:tests/signing/tests.py
+++ b/django:tests/signing/tests.py
@@ -200,8 +200,8 @@
signer.unsign(ts, max_age=10)
class TestBase62(SimpleTestCase):
def test_base62(self):
- tests = [-(10 ** 10), 10 ** 10, 1620378259, *range(-100, 100)]
+ tests = [-(10**10), 10**10, 1620378259, *range(-100, 100)]
for i in tests:
self.assertEqual(i, signing.b62_decode(signing.b62_encode(i)))
--- a/django:tests/utils_tests/test_baseconv.py
+++ b/django:tests/utils_tests/test_baseconv.py
@@ -16,11 +16,11 @@
# RemovedInDjango50Warning
class TestBaseConv(TestCase):
def test_baseconv(self):
- nums = [-(10 ** 10), 10 ** 10, *range(-100, 100)]
+ nums = [-(10**10), 10**10, *range(-100, 100)]
for converter in [base2, base16, base36, base56, base62, base64]:
for i in nums:
self.assertEqual(i, converter.decode(converter.encode(i)))
def test_base11(self):
[flake8-bugbear - https://github.com/PyCQA/flake8-bugbear.git]
╰─> revision a29a583e7a46edd49b8caf68b61e0ddfed4cd82c
--- a/flake8-bugbear:tests/b006_b008.py
+++ b/flake8-bugbear:tests/b006_b008.py
@@ -119,15 +119,15 @@
v=attrgetter("foo"), v2=itemgetter("foo"), v3=methodcaller("foo")
):
pass
-def list_comprehension_also_not_okay(default=[i ** 2 for i in range(3)]):
+def list_comprehension_also_not_okay(default=[i**2 for i in range(3)]):
pass
-def dict_comprehension_also_not_okay(default={i: i ** 2 for i in range(3)}):
+def dict_comprehension_also_not_okay(default={i: i**2 for i in range(3)}):
pass
-def set_comprehension_also_not_okay(default={i ** 2 for i in range(3)}):
+def set_comprehension_also_not_okay(default={i**2 for i in range(3)}):
pass
[hypothesis - https://github.com/HypothesisWorks/hypothesis.git]
╰─> revision 5cb5d5435dcc7481f5870d37786bb4ecc3bae8b2
--- a/hypothesis:hypothesis-python/src/hypothesis/extra/django/_fields.py
+++ b/hypothesis:hypothesis-python/src/hypothesis/extra/django/_fields.py
@@ -138,11 +138,11 @@
@register_for(dm.DurationField)
def _for_duration(field):
# SQLite stores timedeltas as six bytes of microseconds
if using_sqlite():
- delta = timedelta(microseconds=2 ** 47 - 1)
+ delta = timedelta(microseconds=2**47 - 1)
return st.timedeltas(-delta, delta)
return st.timedeltas()
@register_for(dm.SlugField)
@@ -183,11 +183,11 @@
@register_for(dm.DecimalField)
@register_for(df.DecimalField)
def _for_decimal(field):
min_value, max_value = numeric_bounds_from_validators(field)
- bound = Decimal(10 ** field.max_digits - 1) / (10 ** field.decimal_places)
+ bound = Decimal(10**field.max_digits - 1) / (10**field.decimal_places)
return st.decimals(
min_value=max(min_value, -bound),
max_value=min(max_value, bound),
places=field.decimal_places,
)
--- a/hypothesis:hypothesis-python/src/hypothesis/extra/numpy.py
+++ b/hypothesis:hypothesis-python/src/hypothesis/extra/numpy.py
@@ -169,11 +169,11 @@
else:
# Note that this case isn't valid to pass to arrays(), but we support
# it here because we'd have to guard against equivalents in arrays()
# regardless and drawing scalars is a valid use-case.
res = st.sampled_from(TIME_RESOLUTIONS)
- result = st.builds(dtype.type, st.integers(-(2 ** 63), 2 ** 63 - 1), res)
+ result = st.builds(dtype.type, st.integers(-(2**63), 2**63 - 1), res)
else:
raise InvalidArgument(f"No strategy inference for {dtype}")
return result.map(dtype.type)
--- a/hypothesis:hypothesis-python/src/hypothesis/extra/pandas/impl.py
+++ b/hypothesis:hypothesis-python/src/hypothesis/extra/pandas/impl.py
@@ -164,11 +164,11 @@
it will default to some suitable value based on min_size.
"""
check_valid_size(min_size, "min_size")
check_valid_size(max_size, "max_size")
if max_size is None:
- max_size = min([min_size + DEFAULT_MAX_SIZE, 2 ** 63 - 1])
+ max_size = min([min_size + DEFAULT_MAX_SIZE, 2**63 - 1])
check_valid_interval(min_size, max_size, "min_size", "max_size")
return st.integers(min_size, max_size).map(pandas.RangeIndex)
@cacheable
--- a/hypothesis:hypothesis-python/src/hypothesis/internal/conjecture/datatree.py
+++ b/hypothesis:hypothesis-python/src/hypothesis/internal/conjecture/datatree.py
@@ -253,11 +253,11 @@
# We don't expect this assertion to ever fire, but coverage
# wants the loop inside to run if you have branch checking
# on, hence the pragma.
assert ( # pragma: no cover
check_counter != 1000
- or len(branch.children) < (2 ** n_bits)
+ or len(branch.children) < (2**n_bits)
or any(not v.is_exhausted for v in branch.children.values())
)
def rewrite(self, buffer):
"""Use previously seen ConjectureData objects to return a tuple of
--- a/hypothesis:hypothesis-python/src/hypothesis/internal/conjecture/shrinking/floats.py
+++ b/hypothesis:hypothesis-python/src/hypothesis/internal/conjecture/shrinking/floats.py
@@ -18,11 +18,11 @@
from hypothesis.internal.conjecture.floats import float_to_lex
from hypothesis.internal.conjecture.shrinking.common import Shrinker
from hypothesis.internal.conjecture.shrinking.integer import Integer
-MAX_PRECISE_INTEGER = 2 ** 53
+MAX_PRECISE_INTEGER = 2**53
class Float(Shrinker):
def setup(self):
self.NAN = math.nan
--- a/hypothesis:hypothesis-python/src/hypothesis/internal/conjecture/utils.py
+++ b/hypothesis:hypothesis-python/src/hypothesis/internal/conjecture/utils.py
@@ -22,11 +22,11 @@
from hypothesis.errors import InvalidArgument
from hypothesis.internal.compat import floor, int_from_bytes
from hypothesis.internal.floats import int_to_float
-LABEL_MASK = 2 ** 64 - 1
+LABEL_MASK = 2**64 - 1
def calc_label_from_name(name: str) -> int:
hashed = hashlib.sha384(name.encode()).digest()
return int_from_bytes(hashed[:8])
@@ -183,11 +183,11 @@
# There isn't enough precision near one for this to occur for values
# far from 0.
p = 0.0
bits = 1
- size = 2 ** bits
+ size = 2**bits
data.start_example(BIASED_COIN_LABEL)
while True:
# The logic here is a bit complicated and special cased to make it
# play better with the shrinker.
--- a/hypothesis:hypothesis-python/src/hypothesis/internal/entropy.py
+++ b/hypothesis:hypothesis-python/src/hypothesis/internal/entropy.py
@@ -61,11 +61,11 @@
via `register_random`. We support registration of additional random.Random
instances (or other objects with seed, getstate, and setstate methods)
to force determinism on simulation or scheduling frameworks which avoid
using the global random state. See e.g. #1709.
"""
- assert isinstance(seed, int) and 0 <= seed < 2 ** 32
+ assert isinstance(seed, int) and 0 <= seed < 2**32
states: list = []
if "numpy" in sys.modules and not any(
isinstance(x, NumpyRandomWrapper) for x in RANDOMS_TO_MANAGE
):
--- a/hypothesis:hypothesis-python/src/hypothesis/provisional.py
+++ b/hypothesis:hypothesis-python/src/hypothesis/provisional.py
@@ -165,11 +165,11 @@
def url_encode(s):
return "".join(c if c in URL_SAFE_CHARACTERS else "%%%02X" % ord(c) for c in s)
schemes = st.sampled_from(["http", "https"])
- ports = st.integers(min_value=0, max_value=2 ** 16 - 1).map(":{}".format)
+ ports = st.integers(min_value=0, max_value=2**16 - 1).map(":{}".format)
paths = st.lists(st.text(string.printable).map(url_encode)).map("/".join)
return st.builds(
"{}://{}{}/{}{}".format,
schemes,
--- a/hypothesis:hypothesis-python/src/hypothesis/strategies/_internal/core.py
+++ b/hypothesis:hypothesis-python/src/hypothesis/strategies/_internal/core.py
@@ -748,11 +748,11 @@
return f"RandomSeeder({self.seed!r})"
class RandomModule(SearchStrategy):
def do_draw(self, data):
- seed = data.draw(integers(0, 2 ** 32 - 1))
+ seed = data.draw(integers(0, 2**32 - 1))
seed_all, restore_all = get_seeder_and_restorer(seed)
seed_all()
cleanup(restore_all)
return RandomSeeder(seed)
--- a/hypothesis:hypothesis-python/src/hypothesis/strategies/_internal/random.py
+++ b/hypothesis:hypothesis-python/src/hypothesis/strategies/_internal/random.py
@@ -185,11 +185,11 @@
else:
return f
class ArtificialRandom(HypothesisRandom):
- VERSION = 10 ** 6
+ VERSION = 10**6
def __init__(self, note_method_calls, data):
super().__init__(note_method_calls=note_method_calls)
self.__data = data
self.__state = RandomState()
--- a/hypothesis:hypothesis-python/src/hypothesis/strategies/_internal/types.py
+++ b/hypothesis:hypothesis-python/src/hypothesis/strategies/_internal/types.py
@@ -364,11 +364,11 @@
except Exception:
return False
def _networks(bits):
- return st.tuples(st.integers(0, 2 ** bits - 1), st.integers(-bits, 0).map(abs))
+ return st.tuples(st.integers(0, 2**bits - 1), st.integers(-bits, 0).map(abs))
utc_offsets = st.builds(
datetime.timedelta, minutes=st.integers(0, 59), hours=st.integers(-23, 23)
)
--- a/hypothesis:hypothesis-python/tests/array_api/test_arrays.py
+++ b/hypothesis:hypothesis-python/tests/array_api/test_arrays.py
@@ -323,12 +323,12 @@
@pytest.mark.parametrize("fill", [False, True])
@pytest.mark.parametrize(
"dtype, strat",
[
- (xp.float32, st.floats(min_value=10 ** 40, allow_infinity=False)),
- (xp.float64, st.floats(min_value=10 ** 40, allow_infinity=False)),
+ (xp.float32, st.floats(min_value=10**40, allow_infinity=False)),
+ (xp.float64, st.floats(min_value=10**40, allow_infinity=False)),
],
)
@fails_with(InvalidArgument)
@given(st.data())
def test_may_not_use_unrepresentable_elements(fill, dtype, strat, data):
@@ -469,11 +469,11 @@
@st.composite
def distinct_integers(draw):
used = draw(st.shared(st.builds(set), key="distinct_integers.used"))
- i = draw(st.integers(0, 2 ** 64 - 1).filter(lambda x: x not in used))
+ i = draw(st.integers(0, 2**64 - 1).filter(lambda x: x not in used))
used.add(i)
return i
@needs_xp_unique
--- a/hypothesis:hypothesis-python/tests/array_api/test_indices.py
+++ b/hypothesis:hypothesis-python/tests/array_api/test_indices.py
@@ -99,11 +99,11 @@
if 0 in shape:
# If there's a zero in the shape, the array will have no elements.
array = xp.zeros(shape)
assert array.size == 0
- elif math.prod(shape) <= 10 ** 5:
+ elif math.prod(shape) <= 10**5:
# If it's small enough to instantiate, do so with distinct elements.
array = xp.reshape(xp.arange(math.prod(shape)), shape)
else:
# We can't cheat on this one, so just try another.
assume(False)
--- a/hypothesis:hypothesis-python/tests/common/__init__.py
+++ b/hypothesis:hypothesis-python/tests/common/__init__.py
@@ -79,11 +79,11 @@
sampled_from(range(10)),
one_of(just("a"), just("b"), just("c")),
sampled_from(("a", "b", "c")),
integers(),
integers(min_value=3),
- integers(min_value=(-(2 ** 32)), max_value=(2 ** 64)),
+ integers(min_value=(-(2**32)), max_value=(2**64)),
floats(),
floats(min_value=-2.0, max_value=3.0),
floats(),
floats(min_value=-2.0),
floats(),
--- a/hypothesis:hypothesis-python/tests/conftest.py
+++ b/hypothesis:hypothesis-python/tests/conftest.py
@@ -105,11 +105,11 @@
yield
else:
# We start by peturbing the state of the PRNG, because repeatedly
# leaking PRNG state resets state_after to the (previously leaked)
# state_before, and that just shows as "no use of random".
- random.seed(independent_random.randrange(2 ** 32))
+ random.seed(independent_random.randrange(2**32))
before = random.getstate()
yield
after = random.getstate()
if before != after:
if after in random_states_after_tests:
--- a/hypothesis:hypothesis-python/tests/conjecture/test_engine.py
+++ b/hypothesis:hypothesis-python/tests/conjecture/test_engine.py
@@ -254,11 +254,11 @@
bad = [False, False]
def f(data):
seen.append(data.draw_bits(32))
# Rare, potentially multi-error conditions
- if seen[-1] > 2 ** 31:
+ if seen[-1] > 2**31:
bad[0] = True
raise ValueError
bad[1] = True
raise Exception
@@ -423,18 +423,18 @@
def test_fails_health_check_for_large_base():
@fails_health_check(HealthCheck.large_base_example)
def _(data):
- data.draw_bytes(10 ** 6)
+ data.draw_bytes(10**6)
def test_fails_health_check_for_large_non_base():
@fails_health_check(HealthCheck.data_too_large)
def _(data):
if data.draw_bits(8):
- data.draw_bytes(10 ** 6)
+ data.draw_bytes(10**6)
def test_fails_health_check_for_slow_draws():
@fails_health_check(HealthCheck.too_slow)
def _(data):
@@ -810,11 +810,11 @@
def fast_time():
val[0] += 1000
return val[0]
def f(data):
- if data.draw_bits(64) > 2 ** 33:
+ if data.draw_bits(64) > 2**33:
data.mark_interesting()
monkeypatch.setattr(time, "perf_counter", fast_time)
runner = ConjectureRunner(f, settings=settings(database=None))
runner.run()
@@ -1230,11 +1230,11 @@
database_key=b"stuff",
)
runner.run()
- assert len(runner.pareto_front) == 2 ** 4
+ assert len(runner.pareto_front) == 2**4
def test_pareto_front_contains_smallest_valid_when_not_targeting():
with deterministic_PRNG():
@@ -1272,11 +1272,11 @@
database_key=b"stuff",
)
runner.run()
- assert len(runner.pareto_front) == 2 ** 4
+ assert len(runner.pareto_front) == 2**4
def test_database_contains_only_pareto_front():
with deterministic_PRNG():
@@ -1452,11 +1452,11 @@
runner.cached_test_function(bytes(2))
runner.run()
- assert runner.best_observed_targets["n"] == (2 ** 16) - 1
+ assert runner.best_observed_targets["n"] == (2**16) - 1
def test_runs_optimisation_once_when_generating():
def test(data):
data.target_observations["n"] = data.draw_bits(16)
--- a/hypothesis:hypothesis-python/tests/conjecture/test_float_encoding.py
+++ b/hypothesis:hypothesis-python/tests/conjecture/test_float_encoding.py
@@ -23,11 +23,11 @@
from hypothesis.internal.conjecture.data import ConjectureData
from hypothesis.internal.conjecture.engine import ConjectureRunner
from hypothesis.internal.floats import float_to_int
EXPONENTS = list(range(0, flt.MAX_EXPONENT + 1))
-assert len(EXPONENTS) == 2 ** 11
+assert len(EXPONENTS) == 2**11
def assert_reordered_exponents(res):
res = list(res)
assert len(res) == len(EXPONENTS)
@@ -55,16 +55,16 @@
@given(st.data())
def test_double_reverse_bounded(data):
n = data.draw(st.integers(1, 64))
- i = data.draw(st.integers(0, 2 ** n - 1))
+ i = data.draw(st.integers(0, 2**n - 1))
j = flt.reverse_bits(i, n)
assert flt.reverse_bits(j, n) == i
-@given(st.integers(0, 2 ** 64 - 1))
+@given(st.integers(0, 2**64 - 1))
def test_double_reverse(i):
j = flt.reverse64(i)
assert flt.reverse64(j) == i
@@ -101,11 +101,11 @@
assert float_to_int(f) == float_to_int(g)
@example(1, 0.5)
-@given(st.integers(1, 2 ** 53), st.floats(0, 1).filter(lambda x: x not in (0, 1)))
+@given(st.integers(1, 2**53), st.floats(0, 1).filter(lambda x: x not in (0, 1)))
def test_floats_order_worse_than_their_integral_part(n, g):
f = n + g
assume(int(f) != f)
assume(int(f) != 0)
i = flt.float_to_lex(f)
--- a/hypothesis:hypothesis-python/tests/conjecture/test_intlist.py
+++ b/hypothesis:hypothesis-python/tests/conjecture/test_intlist.py
@@ -16,11 +16,11 @@
import pytest
from hypothesis import assume, given, strategies as st
from hypothesis.internal.conjecture.junkdrawer import IntList
-non_neg_lists = st.lists(st.integers(min_value=0, max_value=2 ** 63 - 1))
+non_neg_lists = st.lists(st.integers(min_value=0, max_value=2**63 - 1))
@given(non_neg_lists)
def test_intlist_is_equal_to_itself(ls):
assert IntList(ls) == IntList(ls)
@@ -48,8 +48,8 @@
IntList([-1])
def test_extend_by_too_large():
x = IntList()
- ls = [1, 10 ** 6]
+ ls = [1, 10**6]
x.extend(ls)
assert list(x) == ls
--- a/hypothesis:hypothesis-python/tests/conjecture/test_junkdrawer.py
+++ b/hypothesis:hypothesis-python/tests/conjecture/test_junkdrawer.py
@@ -120,11 +120,11 @@
with pytest.raises(ValueError):
IntList([-1])
def test_int_list_can_contain_arbitrary_size():
- n = 2 ** 65
+ n = 2**65
assert list(IntList([n])) == [n]
def test_int_list_of_length():
assert list(IntList.of_length(10)) == [0] * 10
@@ -142,11 +142,11 @@
assert x == y
def test_int_list_extend():
x = IntList.of_length(3)
- n = 2 ** 64 - 1
+ n = 2**64 - 1
x.extend([n])
assert list(x) == [0, 0, 0, n]
@pytest.mark.parametrize("n", [0, 1, 30, 70])
--- a/hypothesis:hypothesis-python/tests/conjecture/test_lstar.py
+++ b/hypothesis:hypothesis-python/tests/conjecture/test_lstar.py
@@ -218,11 +218,11 @@
def varint(draw):
result = bytearray()
result.append(draw(st.integers(1, 255)))
n = result[0] & 15
assume(n > 0)
- value = draw(st.integers(10, 256 ** n - 1))
+ value = draw(st.integers(10, 256**n - 1))
result.extend(value.to_bytes(n, "big"))
return bytes(result)
@example([b"\x02\x01\n"])
--- a/hypothesis:hypothesis-python/tests/conjecture/test_optimiser.py
+++ b/hypothesis:hypothesis-python/tests/conjecture/test_optimiser.py
@@ -104,11 +104,11 @@
with pytest.raises(RunIsComplete):
runner.optimise_targets()
assert runner.best_observed_targets[""] == 255
-@pytest.mark.parametrize("lower, upper", [(0, 1000), (13, 100), (1000, 2 ** 16 - 1)])
+@pytest.mark.parametrize("lower, upper", [(0, 1000), (13, 100), (1000, 2**16 - 1)])
@pytest.mark.parametrize("score_up", [False, True])
def test_can_find_endpoints_of_a_range(lower, upper, score_up):
with deterministic_PRNG():
def test(data):
--- a/hypothesis:hypothesis-python/tests/conjecture/test_pareto.py
+++ b/hypothesis:hypothesis-python/tests/conjecture/test_pareto.py
@@ -39,11 +39,11 @@
database_key=b"stuff",
)
runner.run()
- assert len(runner.pareto_front) == 2 ** 4
+ assert len(runner.pareto_front) == 2**4
def test_database_contains_only_pareto_front():
with deterministic_PRNG():
@@ -234,11 +234,11 @@
assert runner.interesting_examples
def test_stops_optimising_once_interesting():
- hi = 2 ** 16 - 1
+ hi = 2**16 - 1
def test(data):
n = data.draw_bits(16)
data.target_observations[""] = n
if n < hi:
--- a/hypothesis:hypothesis-python/tests/conjecture/test_test_data.py
+++ b/hypothesis:hypothesis-python/tests/conjecture/test_test_data.py
@@ -109,11 +109,11 @@
assert not any(eg.end is None for eg in x.examples)
class BigStrategy(SearchStrategy):
def do_draw(self, data):
- data.draw_bytes(10 ** 6)
+ data.draw_bytes(10**6)
def test_does_not_double_freeze_in_interval_close():
x = ConjectureData.for_buffer(b"hi")
with pytest.raises(StopTest):
--- a/hypothesis:hypothesis-python/tests/conjecture/test_utils.py
+++ b/hypothesis:hypothesis-python/tests/conjecture/test_utils.py
@@ -110,11 +110,11 @@
count += 1
assert p == Fraction(count, total)
def test_too_small_to_be_useful_coin():
- assert not cu.biased_coin(ConjectureData.for_buffer([1]), 0.5 ** 65)
+ assert not cu.biased_coin(ConjectureData.for_buffer([1]), 0.5**65)
@example([Fraction(1, 3), Fraction(1, 3), Fraction(1, 3)])
@example([Fraction(1, 1), Fraction(1, 2)])
@example([Fraction(1, 2), Fraction(4, 10)])
@@ -198,11 +198,11 @@
def test_restricted_bits():
assert (
cu.integer_range(
- ConjectureData.for_buffer([1, 0, 0, 0, 0]), lower=0, upper=2 ** 64 - 1
+ ConjectureData.for_buffer([1, 0, 0, 0, 0]), lower=0, upper=2**64 - 1
)
== 0
)
@@ -316,7 +316,7 @@
except StopTest:
reject()
def test_samples_from_a_range_directly():
- s = cu.check_sample(range(10 ** 1000), "")
+ s = cu.check_sample(range(10**1000), "")
assert isinstance(s, range)
--- a/hypothesis:hypothesis-python/tests/cover/test_complex_numbers.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_complex_numbers.py
@@ -48,22 +48,22 @@
def test_minimal_quadrant4():
assert minimal(complex_numbers(), lambda x: x.imag < 0 and x.real > 0) == 1 - 1j
-@given(st.data(), st.integers(-5, 5).map(lambda x: 10 ** x))
+@given(st.data(), st.integers(-5, 5).map(lambda x: 10**x))
def test_max_magnitude_respected(data, mag):
c = data.draw(complex_numbers(max_magnitude=mag))
assert abs(c) <= mag * (1 + sys.float_info.epsilon)
@given(complex_numbers(max_magnitude=0))
def test_max_magnitude_zero(val):
assert val == 0
-@given(st.data(), st.integers(-5, 5).map(lambda x: 10 ** x))
+@given(st.data(), st.integers(-5, 5).map(lambda x: 10**x))
def test_min_magnitude_respected(data, mag):
c = data.draw(complex_numbers(min_magnitude=mag))
assert (
abs(c.real) >= mag
or abs(c.imag) >= mag
--- a/hypothesis:hypothesis-python/tests/cover/test_custom_reprs.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_custom_reprs.py
@@ -22,11 +22,11 @@
assert repr(st.integers()) == "integers()"
assert repr(st.integers(min_value=1)) == "integers(min_value=1)"
def test_sampled_repr_leaves_range_as_range():
- huge = 10 ** 100
+ huge = 10**100
assert repr(st.sampled_from(range(huge))) == f"sampled_from(range(0, {huge}))"
def hi(there, stuff):
return there
--- a/hypothesis:hypothesis-python/tests/cover/test_datetimes.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_datetimes.py
@@ -28,11 +28,11 @@
assert minimal(timedeltas(), lambda x: x.days > 0) == dt.timedelta(1)
def test_can_find_negative_delta():
assert minimal(
- timedeltas(max_value=dt.timedelta(10 ** 6)), lambda x: x.days < 0
+ timedeltas(max_value=dt.timedelta(10**6)), lambda x: x.days < 0
) == dt.timedelta(-1)
def test_can_find_on_the_second():
find_any(timedeltas(), lambda x: x.seconds == 0)
@@ -91,11 +91,11 @@
assert minimal(dates(), lambda x: x.year < 2000).year == 1999
@pytest.mark.parametrize("month", range(1, 13))
def test_can_find_each_month(month):
- find_any(dates(), lambda x: x.month == month, settings(max_examples=10 ** 6))
+ find_any(dates(), lambda x: x.month == month, settings(max_examples=10**6))
def test_min_year_is_respected():
assert minimal(dates(min_value=dt.date.min.replace(2003))).year == 2003
--- a/hypothesis:hypothesis-python/tests/cover/test_filter_rewriting.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_filter_rewriting.py
@@ -135,11 +135,11 @@
def mod2(x):
return x % 2
-Y = 2 ** 20
+Y = 2**20
@given(
data=st.data(),
predicates=st.permutations(
--- a/hypothesis:hypothesis-python/tests/cover/test_float_nastiness.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_float_nastiness.py
@@ -175,18 +175,18 @@
@pytest.mark.parametrize(
"kwargs",
[
- {"min_value": 10 ** 5, "width": 16},
- {"max_value": 10 ** 5, "width": 16},
- {"min_value": 10 ** 40, "width": 32},
- {"max_value": 10 ** 40, "width": 32},
- {"min_value": 10 ** 400, "width": 64},
- {"max_value": 10 ** 400, "width": 64},
- {"min_value": 10 ** 400},
- {"max_value": 10 ** 400},
+ {"min_value": 10**5, "width": 16},
+ {"max_value": 10**5, "width": 16},
+ {"min_value": 10**40, "width": 32},
+ {"max_value": 10**40, "width": 32},
+ {"min_value": 10**400, "width": 64},
+ {"max_value": 10**400, "width": 64},
+ {"min_value": 10**400},
+ {"max_value": 10**400},
],
)
def test_out_of_range(kwargs):
with pytest.raises(OverflowError):
st.floats(**kwargs).validate()
@@ -196,11 +196,11 @@
with pytest.raises(InvalidArgument):
st.floats(width=128).validate()
def test_no_single_floats_in_range():
- low = 2.0 ** 25 + 1
+ low = 2.0**25 + 1
high = low + 2
st.floats(low, high).validate() # Note: OK for 64bit floats
with pytest.raises(InvalidArgument):
"""Unrepresentable bounds are deprecated; but we're not testing that
here."""
--- a/hypothesis:hypothesis-python/tests/cover/test_health_checks.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_health_checks.py
@@ -125,11 +125,11 @@
test()
assert "filter" in e.value.args[0]
def test_large_data_will_fail_a_health_check():
- @given(st.none() | st.binary(min_size=10 ** 5, max_size=10 ** 5))
+ @given(st.none() | st.binary(min_size=10**5, max_size=10**5))
@settings(database=None)
def test(x):
pass
with raises(FailedHealthCheck) as e:
--- a/hypothesis:hypothesis-python/tests/cover/test_lookup.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_lookup.py
@@ -706,11 +706,11 @@
# Decimal("snan") is not hashable; we should be able to generate it.
# See https://github.com/HypothesisWorks/hypothesis/issues/2320
find_any(
from_type(typing.Hashable),
lambda x: not types._can_hash(x),
- settings(max_examples=10 ** 5),
+ settings(max_examples=10**5),
)
@pytest.mark.parametrize(
"typ,repr_",
--- a/hypothesis:hypothesis-python/tests/cover/test_numerics.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_numerics.py
@@ -128,11 +128,11 @@
def test_decimals_include_nan():
find_any(decimals(), lambda x: x.is_nan())
def test_decimals_include_inf():
- find_any(decimals(), lambda x: x.is_infinite(), settings(max_examples=10 ** 6))
+ find_any(decimals(), lambda x: x.is_infinite(), settings(max_examples=10**6))
@given(decimals(allow_nan=False))
def test_decimals_can_disallow_nan(x):
assert not x.is_nan()
--- a/hypothesis:hypothesis-python/tests/cover/test_randoms.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_randoms.py
@@ -59,11 +59,11 @@
define_method_strategy("gammavariate", alpha=beta_param, beta=beta_param)
define_method_strategy("weibullvariate", alpha=beta_param, beta=beta_param)
define_method_strategy("choice", seq=seq_param)
define_method_strategy("choices", population=seq_param, k=st.integers(1, 100))
define_method_strategy("expovariate", lambd=beta_param)
-define_method_strategy("_randbelow", n=st.integers(1, 2 ** 64))
+define_method_strategy("_randbelow", n=st.integers(1, 2**64))
define_method_strategy("random")
define_method_strategy("getrandbits", n=st.integers(1, 128))
define_method_strategy("gauss", mu=st.floats(-1000, 1000), sigma=beta_param)
define_method_strategy("normalvariate", mu=st.floats(-1000, 1000), sigma=beta_param)
# the standard library lognormalvariate is weirdly bad at handling large floats
@@ -77,11 +77,11 @@
define_method_strategy("paretovariate", alpha=st.floats(min_value=1.0))
define_method_strategy("shuffle", x=st.lists(st.integers()))
define_method_strategy("randbytes", n=st.integers(0, 100))
-INT64 = st.integers(-(2 ** 63), 2 ** 63 - 1)
+INT64 = st.integers(-(2**63), 2**63 - 1)
@st.composite
def any_call_of_method(draw, method):
if method == "sample":
@@ -337,15 +337,15 @@
assert len(rnd.randbytes(n)) == n
@given(any_random)
def test_can_manage_very_long_ranges_with_step(rnd):
- i = rnd.randrange(0, 2 ** 256, 3)
+ i = rnd.randrange(0, 2**256, 3)
assert i % 3 == 0
- assert 0 <= i < 2 ** 256
- assert i in range(0, 2 ** 256, 3)
+ assert 0 <= i < 2**256
+ assert i in range(0, 2**256, 3)
@given(any_random, st.data())
def test_range_with_arbitrary_step_is_in_range(rnd, data):
endpoints = st.integers(-100, 100)
--- a/hypothesis:hypothesis-python/tests/cover/test_reflection.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_reflection.py
@@ -507,11 +507,11 @@
@pytest.mark.parametrize(
"func,args,expected",
[
(lambda: None, (), None),
- (lambda a: a ** 2, (2,), 4),
+ (lambda a: a**2, (2,), 4),
(lambda *a: a, [1, 2, 3], (1, 2, 3)),
],
)
def test_can_proxy_lambdas(func, args, expected):
@proxies(func)
--- a/hypothesis:hypothesis-python/tests/cover/test_regex.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_regex.py
@@ -181,18 +181,18 @@
@pytest.mark.parametrize("pattern", [re.compile("\\A.\\Z", re.DOTALL), "(?s)\\A.\\Z"])
def test_any_with_dotall_generate_newline(pattern):
find_any(
- st.from_regex(pattern), lambda s: s == "\n", settings(max_examples=10 ** 6)
+ st.from_regex(pattern), lambda s: s == "\n", settings(max_examples=10**6)
)
@pytest.mark.parametrize("pattern", [re.compile(b"\\A.\\Z", re.DOTALL), b"(?s)\\A.\\Z"])
def test_any_with_dotall_generate_newline_binary(pattern):
find_any(
- st.from_regex(pattern), lambda s: s == b"\n", settings(max_examples=10 ** 6)
+ st.from_regex(pattern), lambda s: s == b"\n", settings(max_examples=10**6)
)
@pytest.mark.parametrize(
"pattern",
@@ -427,11 +427,11 @@
)
def test_fullmatch_generates_example(pattern, matching_str):
find_any(
st.from_regex(pattern, fullmatch=True),
lambda s: s == matching_str,
- settings(max_examples=10 ** 6),
+ settings(max_examples=10**6),
)
@pytest.mark.parametrize(
"pattern,eqiv_pattern",
--- a/hypothesis:hypothesis-python/tests/cover/test_simple_strings.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_simple_strings.py
@@ -109,8 +109,8 @@
x = ConjectureData.for_buffer(b"foo")
assert x.draw(binary(min_size=3, max_size=3)) == b"foo"
-@given(text(max_size=10 ** 6))
+@given(text(max_size=10**6))
def test_can_set_max_size_large(s):
pass
--- a/hypothesis:hypothesis-python/tests/cover/test_slices.py
+++ b/hypothesis:hypothesis-python/tests/cover/test_slices.py
@@ -69,11 +69,11 @@
@given(st.integers(1, 1000))
@settings(deadline=None)
def test_step_will_be_negative(size):
find_any(
- st.slices(size), lambda x: (x.step or 1) < 0, settings(max_examples=10 ** 6)
+ st.slices(size), lambda x: (x.step or 1) < 0, settings(max_examples=10**6)
)
@given(st.integers(1, 1000))
@settings(deadline=None)
@@ -81,17 +81,17 @@
find_any(st.slices(size), lambda x: (x.step or 1) > 0)
@pytest.mark.parametrize("size", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
def test_stop_will_equal_size(size):
- find_any(st.slices(size), lambda x: x.stop == size, settings(max_examples=10 ** 6))
+ find_any(st.slices(size), lambda x: x.stop == size, settings(max_examples=10**6))
@pytest.mark.parametrize("size", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
def test_start_will_equal_size(size):
find_any(
- st.slices(size), lambda x: x.start == size - 1, settings(max_examples=10 ** 6)
+ st.slices(size), lambda x: x.start == size - 1, settings(max_examples=10**6)
)
@given(st.integers(1, 1000))
@settings(deadline=None)
--- a/hypothesis:hypothesis-python/tests/nocover/test_compat.py
+++ b/hypothesis:hypothesis-python/tests/nocover/test_compat.py
@@ -32,11 +32,11 @@
def test_to_int_in_big_endian_order(x, y):
x, y = sorted((x, y))
assert 0 <= int_from_bytes(x) <= int_from_bytes(y)
-ints8 = st.integers(min_value=0, max_value=2 ** 63 - 1)
+ints8 = st.integers(min_value=0, max_value=2**63 - 1)
@given(ints8, ints8)
def test_to_bytes_in_big_endian_order(x, y):
x, y = sorted((x, y))
--- a/hypothesis:hypothesis-python/tests/nocover/test_conjecture_int_list.py
+++ b/hypothesis:hypothesis-python/tests/nocover/test_conjecture_int_list.py
@@ -15,11 +15,11 @@
from hypothesis import strategies as st
from hypothesis.internal.conjecture.junkdrawer import IntList
from hypothesis.stateful import RuleBasedStateMachine, initialize, invariant, rule
-INTEGERS = st.integers(0, 2 ** 68)
+INTEGERS = st.integers(0, 2**68)
@st.composite
def valid_index(draw):
machine = draw(st.runner())
--- a/hypothesis:hypothesis-python/tests/nocover/test_conjecture_utils.py
+++ b/hypothesis:hypothesis-python/tests/nocover/test_conjecture_utils.py
@@ -30,16 +30,16 @@
assert cu.Sampler(weights).table == sampler.table
counts = [0] * len(weights)
i = 0
- while i < 2 ** 16:
+ while i < 2**16:
data = ConjectureData.for_buffer(int_to_bytes(i, 2))
try:
c = sampler.sample(data)
counts[c] += 1
- assert probabilities[c] >= Fraction(counts[c], 2 ** 16)
+ assert probabilities[c] >= Fraction(counts[c], 2**16)
except StopTest:
pass
if 1 in data.forced_indices:
i += 256
else:
--- a/hypothesis:hypothesis-python/tests/nocover/test_explore_arbitrary_languages.py
+++ b/hypothesis:hypothesis-python/tests/nocover/test_explore_arbitrary_languages.py
@@ -131,11 +131,11 @@
phases=set(settings.default.phases) - {Phase.shrink},
)
@given(st.data())
def test_explore_an_arbitrary_language(data):
root = data.draw(writes | branches)
- seed = data.draw(st.integers(0, 2 ** 64 - 1))
+ seed = data.draw(st.integers(0, 2**64 - 1))
run_language_test_for(root, data, seed)
@pytest.mark.parametrize("seed, language", [])
def test_run_specific_example(seed, language):
--- a/hypothesis:hypothesis-python/tests/nocover/test_sampled_from.py
+++ b/hypothesis:hypothesis-python/tests/nocover/test_sampled_from.py
@@ -21,11 +21,11 @@
from hypothesis.errors import InvalidArgument
from tests.common.utils import counts_calls, fails_with
-@pytest.mark.parametrize("n", [100, 10 ** 5, 10 ** 6, 2 ** 25])
+@pytest.mark.parametrize("n", [100, 10**5, 10**6, 2**25])
def test_filter_large_lists(n):
filter_limit = 100 * 10000
@counts_calls
def cond(x):
--- a/hypothesis:hypothesis-python/tests/nocover/test_simple_numbers.py
+++ b/hypothesis:hypothesis-python/tests/nocover/test_simple_numbers.py
@@ -35,14 +35,14 @@
boundaries = pytest.mark.parametrize(
"boundary",
sorted(
- [2 ** i for i in range(10)]
- + [2 ** i - 1 for i in range(10)]
- + [2 ** i + 1 for i in range(10)]
- + [10 ** i for i in range(6)]
+ [2**i for i in range(10)]
+ + [2**i - 1 for i in range(10)]
+ + [2**i + 1 for i in range(10)]
+ + [10**i for i in range(6)]
),
)
@boundaries
@@ -78,11 +78,11 @@
def test_single_integer_range_is_range():
assert minimal(integers(1, 1), lambda x: True) == 1
def test_minimal_small_number_in_large_range():
- assert minimal(integers((-(2 ** 32)), 2 ** 32), lambda x: x >= 101) == 101
+ assert minimal(integers((-(2**32)), 2**32), lambda x: x >= 101) == 101
def test_minimal_small_sum_float_list():
xs = minimal(lists(floats(), min_size=10), lambda x: sum(x) >= 1.0)
assert sum(xs) <= 2.0
@@ -136,11 +136,11 @@
except (OverflowError, ValueError):
return False
def test_can_minimal_float_far_from_integral():
- minimal(floats(), lambda x: math.isfinite(x) and not is_integral(x * (2 ** 32)))
+ minimal(floats(), lambda x: math.isfinite(x) and not is_integral(x * (2**32)))
def test_list_of_fractional_float():
assert set(
minimal(
@@ -181,11 +181,11 @@
assert minimal(floats(max_value=-1.0), lambda x: True) == -1.0
@pytest.mark.parametrize("k", range(10))
def test_floats_from_zero_have_reasonable_range(k):
- n = 10 ** k
+ n = 10**k
assert minimal(floats(min_value=0.0), lambda x: x >= n) == float(n)
assert minimal(floats(max_value=0.0), lambda x: x <= -n) == float(-n)
def test_explicit_allow_nan():
@@ -210,12 +210,12 @@
class TestFloatsAreFloats:
@given(floats())
def test_unbounded(self, arg):
assert isinstance(arg, float)
- @given(floats(min_value=0, max_value=float(2 ** 64 - 1)))
+ @given(floats(min_value=0, max_value=float(2**64 - 1)))
def test_int_float(self, arg):
assert isinstance(arg, float)
- @given(floats(min_value=float(0), max_value=float(2 ** 64 - 1)))
+ @given(floats(min_value=float(0), max_value=float(2**64 - 1)))
def test_float_float(self, arg):
assert isinstance(arg, float)
--- a/hypothesis:hypothesis-python/tests/nocover/test_strategy_state.py
+++ b/hypothesis:hypothesis-python/tests/nocover/test_strategy_state.py
@@ -126,11 +126,11 @@
def float(self, source):
return source
@rule(target=varied_floats, source=varied_floats, offset=integers(-100, 100))
def adjust_float(self, source, offset):
- return int_to_float(clamp(0, float_to_int(source) + offset, 2 ** 64 - 1))
+ return int_to_float(clamp(0, float_to_int(source) + offset, 2**64 - 1))
@rule(target=strategies, left=varied_floats, right=varied_floats)
def float_range(self, left, right):
assume(math.isfinite(left) and math.isfinite(right))
left, right = sorted((left, right))
--- a/hypothesis:hypothesis-python/tests/numpy/test_fill_values.py
+++ b/hypothesis:hypothesis-python/tests/numpy/test_fill_values.py
@@ -25,11 +25,11 @@
@st.composite
def distinct_integers(draw):
used = draw(st.shared(st.builds(set), key="distinct_integers.used"))
- i = draw(st.integers(0, 2 ** 64 - 1).filter(lambda x: x not in used))
+ i = draw(st.integers(0, 2**64 - 1).filter(lambda x: x not in used))
used.add(i)
return i
@given(arrays("uint64", 10, elements=distinct_integers()))
--- a/hypothesis:hypothesis-python/tests/numpy/test_gen_data.py
+++ b/hypothesis:hypothesis-python/tests/numpy/test_gen_data.py
@@ -351,14 +351,14 @@
@pytest.mark.parametrize("fill", [False, True])
@pytest.mark.parametrize(
"dtype,strat",
[
("float16", st.floats(min_value=65520, allow_infinity=False)),
- ("float32", st.floats(min_value=10 ** 40, allow_infinity=False)),
+ ("float32", st.floats(min_value=10**40, allow_infinity=False)),
(
"complex64",
- st.complex_numbers(min_magnitude=10 ** 300, allow_infinity=False),
+ st.complex_numbers(min_magnitude=10**300, allow_infinity=False),
),
("U1", st.text(min_size=2, max_size=2)),
("S1", st.binary(min_size=2, max_size=2)),
],
)
@@ -928,11 +928,11 @@
# check default arg behavior too
kwargs = {"min_dims": min_dims} if min_dims is not None else {}
find_any(
nps.broadcastable_shapes(shape, min_side=0, max_dims=max_dims, **kwargs),
lambda x: len(x) == desired_ndim,
- settings(max_examples=10 ** 6),
+ settings(max_examples=10**6),
)
@settings(deadline=None)
@given(
@@ -961,11 +961,11 @@
min_side=0,
max_dims=max_dims,
**kwargs,
),
lambda x: {len(s) for s in x.input_shapes} == set(desired_ndims),
- settings(max_examples=10 ** 6),
+ settings(max_examples=10**6),
)
@settings(deadline=None)
@given(
@@ -1059,11 +1059,11 @@
find_any(
nps.integer_array_indices(
shape, result_shape=st.just(target.shape), dtype=np.dtype("int8")
),
lambda index: np.all(target == x[index]),
- settings(max_examples=10 ** 6),
+ settings(max_examples=10**6),
)
@pytest.mark.parametrize(
"condition",
@@ -1152,11 +1152,11 @@
if 0 in shape:
# If there's a zero in the shape, the array will have no elements.
array = np.zeros(shape)
assert array.size == 0
- elif np.prod(shape) <= 10 ** 5:
+ elif np.prod(shape) <= 10**5:
# If it's small enough to instantiate, do so with distinct elements.
array = np.arange(np.prod(shape)).reshape(shape)
else:
# We can't cheat on this one, so just try another.
assume(False)
--- a/hypothesis:hypothesis-python/tests/numpy/test_gufunc.py
+++ b/hypothesis:hypothesis-python/tests/numpy/test_gufunc.py
@@ -131,11 +131,11 @@
find_any(
nps.mutually_broadcastable_shapes(
signature="(m?,n),(n,p?)->(m?,p?)", max_dims=0
),
lambda shapes: shapes == target_shapes,
- settings(max_examples=10 ** 6),
+ settings(max_examples=10**6),
)
@settings(deadline=None, max_examples=50)
@given(
--- a/hypothesis:hypothesis-python/tests/pandas/test_indexes.py
+++ b/hypothesis:hypothesis-python/tests/pandas/test_indexes.py
@@ -27,11 +27,11 @@
@given(pdst.indexes(dtype=int, max_size=0))
def test_gets_right_dtype_for_empty_indices(ix):
assert ix.dtype == np.dtype("int64")
-@given(pdst.indexes(elements=st.integers(0, 2 ** 63 - 1), max_size=0))
+@given(pdst.indexes(elements=st.integers(0, 2**63 - 1), max_size=0))
def test_gets_right_dtype_for_empty_indices_with_elements(ix):
assert ix.dtype == np.dtype("int64")
def test_does_not_generate_impossible_conditions():
@@ -50,11 +50,11 @@
assert len(ix) == 2
assert len(set(ix)) == len(ix)
# Sizes that fit into an int64 without overflow
-range_sizes = st.integers(0, 2 ** 63 - 1)
+range_sizes = st.integers(0, 2**63 - 1)
@given(range_sizes, range_sizes | st.none(), st.data())
def test_arbitrary_range_index(i, j, data):
if j is not None:
--- a/hypothesis:hypothesis-python/tests/quality/test_discovery_ability.py
+++ b/hypothesis:hypothesis-python/tests/quality/test_discovery_ability.py
@@ -212,11 +212,11 @@
test_mostly_largish_floats = define_test(
floats(), lambda t: t + 1 > 1, condition=lambda x: x > 0
)
test_ints_can_occasionally_be_really_large = define_test(
- integers(), lambda t: t >= 2 ** 63
+ integers(), lambda t: t >= 2**63
)
test_mixing_is_sometimes_distorted = define_test(
lists(booleans() | tuples()),
distorted,
--- a/hypothesis:hypothesis-python/tests/quality/test_float_shrinking.py
+++ b/hypothesis:hypothesis-python/tests/quality/test_float_shrinking.py
@@ -50,14 +50,14 @@
g = minimal(st.floats(), lambda x: x >= f, settings(verbosity=Verbosity.quiet))
assert g == ceil(f)
@example(1)
-@given(st.integers(1, 2 ** 16 - 1))
+@given(st.integers(1, 2**16 - 1))
@settings(deadline=None, suppress_health_check=HealthCheck.all(), max_examples=10)
def test_shrinks_downwards_to_integers_when_fractional(b):
g = minimal(
st.floats(),
- lambda x: assume((0 < x < (2 ** 53)) and int(x) != x) and x >= b,
- settings=settings(verbosity=Verbosity.quiet, max_examples=10 ** 6),
+ lambda x: assume((0 < x < (2**53)) and int(x) != x) and x >= b,
+ settings=settings(verbosity=Verbosity.quiet, max_examples=10**6),
)
assert g == b + 0.5
--- a/hypothesis:hypothesis-python/tests/quality/test_poisoned_lists.py
+++ b/hypothesis:hypothesis-python/tests/quality/test_poisoned_lists.py
@@ -50,21 +50,21 @@
class Matrices(SearchStrategy):
def __init__(self, elements, size):
super().__init__()
- self.__length = st.integers(0, ceil(size ** 0.5))
+ self.__length = st.integers(0, ceil(size**0.5))
self.__elements = elements
def do_draw(self, data):
n = data.draw(self.__length)
m = data.draw(self.__length)
return [data.draw(self.__elements) for _ in range(n * m)]
-LOTS = 10 ** 6
+LOTS = 10**6
TRIAL_SETTINGS = settings(max_examples=LOTS, database=None)
@pytest.mark.parametrize(
--- a/hypothesis:hypothesis-python/tests/quality/test_poisoned_trees.py
+++ b/hypothesis:hypothesis-python/tests/quality/test_poisoned_trees.py
@@ -22,11 +22,11 @@
from hypothesis.internal.conjecture.engine import ConjectureData, ConjectureRunner
from hypothesis.strategies._internal import SearchStrategy
POISON = "POISON"
-MAX_INT = 2 ** 32 - 1
+MAX_INT = 2**32 - 1
class PoisonedTree(SearchStrategy):
"""Generates variable sized tuples with an implicit tree structure.
@@ -51,11 +51,11 @@
return (POISON,)
else:
return (None,)
-LOTS = 10 ** 6
+LOTS = 10**6
TEST_SETTINGS = settings(
database=None,
suppress_health_check=HealthCheck.all(),
--- a/hypothesis:hypothesis-python/tests/quality/test_shrink_quality.py
+++ b/hypothesis:hypothesis-python/tests/quality/test_shrink_quality.py
@@ -176,24 +176,24 @@
if k > 0:
assert k - 1 in x
def test_minimize_single_element_in_silly_large_int_range():
- ir = integers(-(2 ** 256), 2 ** 256)
- assert minimal(ir, lambda x: x >= -(2 ** 255)) == 0
+ ir = integers(-(2**256), 2**256)
+ assert minimal(ir, lambda x: x >= -(2**255)) == 0
def test_minimize_multiple_elements_in_silly_large_int_range():
desired_result = [0] * 20
- ir = integers(-(2 ** 256), 2 ** 256)
+ ir = integers(-(2**256), 2**256)
x = minimal(lists(ir), lambda x: len(x) >= 20, timeout_after=20)
assert x == desired_result
def test_minimize_multiple_elements_in_silly_large_int_range_min_is_not_dupe():
- ir = integers(0, 2 ** 256)
+ ir = integers(0, 2**256)
target = list(range(20))
x = minimal(
lists(ir),
lambda x: (assume(len(x) >= 20) and all(x[i] >= target[i] for i in target)),
[pandas - https://github.com/pandas-dev/pandas.git]
╰─> revision 776329fdd7fef357cc7ef3a37ffcdda3df8e6fc1
--- a/pandas:asv_bench/benchmarks/algorithms.py
+++ b/pandas:asv_bench/benchmarks/algorithms.py
@@ -32,11 +32,11 @@
],
]
param_names = ["unique", "sort", "dtype"]
def setup(self, unique, sort, dtype):
- N = 10 ** 5
+ N = 10**5
string_index = tm.makeStringIndex(N)
string_arrow = None
if dtype == "string[pyarrow]":
try:
string_arrow = pd.array(string_index, dtype="string[pyarrow]")
@@ -72,11 +72,11 @@
["int", "uint", "float", "string", "datetime64[ns]", "datetime64[ns, tz]"],
]
param_names = ["unique", "keep", "dtype"]
def setup(self, unique, keep, dtype):
- N = 10 ** 5
+ N = 10**5
data = {
"int": pd.Index(np.arange(N), dtype="int64"),
"uint": pd.Index(np.arange(N), dtype="uint64"),
"float": pd.Index(np.random.randn(N), dtype="float64"),
"string": tm.makeStringIndex(N),
@@ -95,11 +95,11 @@
self.idx.duplicated(keep=keep)
class Hashing:
def setup_cache(self):
- N = 10 ** 5
+ N = 10**5
df = pd.DataFrame(
{
"strings": pd.Series(
tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=N))
@@ -143,11 +143,11 @@
["float", "int", "uint"],
]
param_names = ["quantile", "interpolation", "dtype"]
def setup(self, quantile, interpolation, dtype):
- N = 10 ** 5
+ N = 10**5
data = {
"int": np.arange(N),
"uint": np.arange(N).astype(np.uint64),
"float": np.random.randn(N),
}
@@ -156,11 +156,11 @@
def time_quantile(self, quantile, interpolation, dtype):
self.idx.quantile(quantile, interpolation=interpolation)
class SortIntegerArray:
- params = [10 ** 3, 10 ** 5]
+ params = [10**3, 10**5]
def setup(self, N):
data = np.arange(N, dtype=float)
data[40] = np.nan
self.array = pd.array(data, dtype="Int64")
--- a/pandas:asv_bench/benchmarks/algos/isin.py
+++ b/pandas:asv_bench/benchmarks/algos/isin.py
@@ -46,11 +46,11 @@
self.values = self.series._values[::3]
self.mismatched = [1, 2]
elif dtype in ["category[object]", "category[int]"]:
# Note: sizes are different in this case than others
- n = 5 * 10 ** 5
+ n = 5 * 10**5
sample_size = 100
arr = list(np.random.randint(0, n // 10, size=n))
if dtype == "category[object]":
arr = [f"s{i:04d}" for i in arr]
@@ -171,11 +171,11 @@
]
param_names = ["dtype", "M", "offset_factor"]
def setup(self, dtype, M, offset_factor):
offset = int(M * offset_factor)
- tmp = Series(np.random.randint(offset, M + offset, 10 ** 6))
+ tmp = Series(np.random.randint(offset, M + offset, 10**6))
self.series = tmp.astype(dtype)
self.values = np.arange(M).astype(dtype)
def time_isin(self, dtype, M, offset_factor):
self.series.isin(self.values)
@@ -188,12 +188,12 @@
["many_different_values", "few_different_values", "only_nans_values"],
]
param_names = ["dtype", "title"]
def setup(self, dtype, title):
- N_many = 10 ** 5
- N_few = 10 ** 6
+ N_many = 10**5
+ N_few = 10**6
self.series = Series([1, 2], dtype=dtype)
if title == "many_different_values":
# runtime is dominated by creation of the lookup-table
self.values = np.arange(N_many, dtype=np.float64)
@@ -237,25 +237,25 @@
params = [variants, variants]
param_names = ["series_type", "vals_type"]
def setup(self, series_type, vals_type):
- N_many = 10 ** 5
+ N_many = 10**5
if series_type == "nans":
- ser_vals = np.full(10 ** 4, np.nan)
+ ser_vals = np.full(10**4, np.nan)
elif series_type == "short":
ser_vals = np.arange(2)
elif series_type == "long":
ser_vals = np.arange(N_many)
elif series_type == "long_floats":
ser_vals = np.arange(N_many, dtype=np.float_)
self.series = Series(ser_vals).astype(object)
if vals_type == "nans":
- values = np.full(10 ** 4, np.nan)
+ values = np.full(10**4, np.nan)
elif vals_type == "short":
values = np.arange(2)
elif vals_type == "long":
values = np.arange(N_many)
elif vals_type == "long_floats":
@@ -274,11 +274,11 @@
["random_hits", "random_misses", "monotone_hits", "monotone_misses"],
]
param_names = ["dtype", "MaxNumber", "series_type"]
def setup(self, dtype, MaxNumber, series_type):
- N = 10 ** 7
+ N = 10**7
if series_type == "random_hits":
array = np.random.randint(0, MaxNumber, N)
if series_type == "random_misses":
array = np.random.randint(0, MaxNumber, N) + MaxNumber
@@ -301,19 +301,19 @@
["random", "monotone"],
]
param_names = ["dtype", "series_type"]
def setup(self, dtype, series_type):
- N = 10 ** 7
+ N = 10**7
if series_type == "random":
vals = np.random.randint(0, 10 * N, N)
if series_type == "monotone":
vals = np.arange(N)
self.values = vals.astype(dtype.lower())
- M = 10 ** 6 + 1
+ M = 10**6 + 1
self.series = Series(np.arange(M)).astype(dtype)
def time_isin(self, dtypes, series_type):
self.series.isin(self.values)
--- a/pandas:asv_bench/benchmarks/arithmetic.py
+++ b/pandas:asv_bench/benchmarks/arithmetic.py
@@ -57,11 +57,11 @@
class OpWithFillValue:
def setup(self):
# GH#31300
- arr = np.arange(10 ** 6)
+ arr = np.arange(10**6)
df = DataFrame({"A": arr})
ser = df["A"]
self.df = df
self.ser = ser
@@ -91,11 +91,11 @@
]
]
param_names = ["opname"]
def setup(self, opname):
- arr = np.arange(10 ** 6).reshape(1000, -1)
+ arr = np.arange(10**6).reshape(1000, -1)
df = DataFrame(arr)
df["C"] = 1.0
self.df = df
self.ser = df[0]
self.row = df.iloc[0]
@@ -199,11 +199,11 @@
expr.set_numexpr_threads()
class Ops2:
def setup(self):
- N = 10 ** 3
+ N = 10**3
self.df = DataFrame(np.random.randn(N, N))
self.df2 = DataFrame(np.random.randn(N, N))
self.df_int = DataFrame(
np.random.randint(
@@ -256,11 +256,11 @@
params = [None, "US/Eastern"]
param_names = ["tz"]
def setup(self, tz):
- N = 10 ** 6
+ N = 10**6
halfway = (N // 2) - 1
self.s = Series(date_range("20010101", periods=N, freq="T", tz=tz))
self.ts = self.s[halfway]
self.s2 = Series(date_range("20010101", periods=N, freq="s", tz=tz))
@@ -278,11 +278,11 @@
self.s - self.s.shift()
class IrregularOps:
def setup(self):
- N = 10 ** 5
+ N = 10**5
idx = date_range(start="1/1/2000", periods=N, freq="s")
s = Series(np.random.randn(N), index=idx)
self.left = s.sample(frac=1)
self.right = s.sample(frac=1)
@@ -302,11 +302,11 @@
class CategoricalComparisons:
params = ["__lt__", "__le__", "__eq__", "__ne__", "__ge__", "__gt__"]
param_names = ["op"]
def setup(self, op):
- N = 10 ** 5
+ N = 10**5
self.cat = pd.Categorical(list("aabbcd") * N, ordered=True)
def time_categorical_op(self, op):
getattr(self.cat, op)("b")
@@ -315,11 +315,11 @@
params = ["float", "int"]
param_names = ["dtype"]
def setup(self, dtype):
- N = 10 ** 6
+ N = 10**6
indexes = {"int": "makeIntIndex", "float": "makeFloatIndex"}
self.index = getattr(tm, indexes[dtype])(N)
def time_add(self, dtype):
self.index + 2
@@ -341,11 +341,11 @@
# from GH 7332
params = numeric_dtypes
param_names = ["dtype"]
def setup(self, dtype):
- N = 5 * 10 ** 5
+ N = 5 * 10**5
self.df = DataFrame(
{"A": np.arange(N).astype(dtype), "B": np.arange(N).astype(dtype)}
)
def time_add(self, dtype):
@@ -365,11 +365,11 @@
class DateInferOps:
# from GH 7332
def setup_cache(self):
- N = 5 * 10 ** 5
+ N = 5 * 10**5
df = DataFrame({"datetime64": np.arange(N).astype("datetime64[ms]")})
df["timedelta"] = df["datetime64"] - df["datetime64"]
return df
def time_subtract_datetimes(self, df):
@@ -386,20 +386,20 @@
params = [1, -1, 0]
param_names = ["scalar"]
def setup(self, scalar):
- N = 10 ** 6
+ N = 10**6
self.arr = np.arange(N)
def time_add_overflow_scalar(self, scalar):
checked_add_with_arr(self.arr, scalar)
class AddOverflowArray:
def setup(self):
- N = 10 ** 6
+ N = 10**6
self.arr = np.arange(N)
self.arr_rev = np.arange(-N, 0)
self.arr_mixed = np.array([1, -1]).repeat(N / 2)
self.arr_nan_1 = np.random.choice([True, False], size=N)
self.arr_nan_2 = np.random.choice([True, False], size=N)
--- a/pandas:asv_bench/benchmarks/categoricals.py
+++ b/pandas:asv_bench/benchmarks/categoricals.py
@@ -17,11 +17,11 @@
pass
class Constructor:
def setup(self):
- N = 10 ** 5
+ N = 10**5
self.categories = list("abcde")
self.cat_idx = pd.Index(self.categories)
self.values = np.tile(self.categories, N)
self.codes = np.tile(range(len(self.categories)), N)
@@ -69,20 +69,20 @@
pd.Categorical(self.series)
class AsType:
def setup(self):
- N = 10 ** 5
+ N = 10**5
random_pick = np.random.default_rng().choice
categories = {
"str": list(string.ascii_letters),
- "int": np.random.randint(2 ** 16, size=154),
+ "int": np.random.randint(2**16, size=154),
"float": sys.maxsize * np.random.random((38,)),
"timestamp": [
- pd.Timestamp(x, unit="s") for x in np.random.randint(2 ** 18, size=578)
+ pd.Timestamp(x, unit="s") for x in np.random.randint(2**18, size=578)
],
}
self.df = pd.DataFrame(
{col: random_pick(cats, N) for col, cats in categories.items()}
@@ -110,11 +110,11 @@
self.df["float"].astype(pd.DatetimeTZDtype(tz="US/Pacific"))
class Concat:
def setup(self):
- N = 10 ** 5
+ N = 10**5
self.s = pd.Series(list("aabbcd") * N).astype("category")
self.a = pd.Categorical(list("aabbcd") * N)
self.b = pd.Categorical(list("bbcdjk") * N)
@@ -146,11 +146,11 @@
params = [True, False]
param_names = ["dropna"]
def setup(self, dropna):
- n = 5 * 10 ** 5
+ n = 5 * 10**5
arr = [f"s{i:04d}" for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype("category")
def time_value_counts(self, dropna):
self.ts.value_counts(dropna=dropna)
@@ -164,31 +164,31 @@
str(self.sel)
class SetCategories:
def setup(self):
- n = 5 * 10 ** 5
+ n = 5 * 10**5
arr = [f"s{i:04d}" for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype("category")
def time_set_categories(self):
self.ts.cat.set_categories(self.ts.cat.categories[::2])
class RemoveCategories:
def setup(self):
- n = 5 * 10 ** 5
+ n = 5 * 10**5
arr = [f"s{i:04d}" for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype("category")
def time_remove_categories(self):
self.ts.cat.remove_categories(self.ts.cat.categories[::2])
class Rank:
def setup(self):
- N = 10 ** 5
+ N = 10**5
ncats = 100
self.s_str = pd.Series(tm.makeCategoricalIndex(N, ncats)).astype(str)
self.s_str_cat = pd.Series(self.s_str, dtype="category")
with warnings.catch_warnings(record=True):
@@ -239,11 +239,11 @@
self.s.is_monotonic_decreasing
class Contains:
def setup(self):
- N = 10 ** 5
+ N = 10**5
self.ci = tm.makeCategoricalIndex(N)
self.c = self.ci.values
self.key = self.ci.categories[0]
def time_categorical_index_contains(self):
@@ -257,11 +257,11 @@
params = ["monotonic_incr", "monotonic_decr", "non_monotonic"]
param_names = ["index"]
def setup(self, index):
- N = 10 ** 6
+ N = 10**6
categories = ["a", "b", "c"]
values = [0] * N + [1] * N + [2] * N
if index == "monotonic_incr":
self.data = pd.Categorical.from_codes(values, categories=categories)
elif index == "monotonic_decr":
@@ -293,11 +293,11 @@
self.data[self.data == self.cat_scalar]
class Indexing:
def setup(self):
- N = 10 ** 5
+ N = 10**5
self.index = pd.CategoricalIndex(range(N), range(N))
self.series = pd.Series(range(N), index=self.index).sort_index()
self.category = self.index[500]
def time_get_loc(self):
@@ -325,11 +325,11 @@
self.index.sort_values(ascending=False)
class SearchSorted:
def setup(self):
- N = 10 ** 5
+ N = 10**5
self.ci = tm.makeCategoricalIndex(N).sort_values()
self.c = self.ci.values
self.key = self.ci.categories[1]
def time_categorical_index_contains(self):
--- a/pandas:asv_bench/benchmarks/ctors.py
+++ b/pandas:asv_bench/benchmarks/ctors.py
@@ -74,11 +74,11 @@
def setup(self, data_fmt, with_index, dtype):
if data_fmt in (gen_of_str, gen_of_tuples) and with_index:
raise NotImplementedError(
"Series constructors do not support using generators with indexes"
)
- N = 10 ** 4
+ N = 10**4
if dtype == "float":
arr = np.random.randn(N)
else:
arr = np.arange(N)
self.data = data_fmt(arr)
@@ -88,11 +88,11 @@
Series(self.data, index=self.index)
class SeriesDtypesConstructors:
def setup(self):
- N = 10 ** 4
+ N = 10**4
self.arr = np.random.randn(N)
self.arr_str = np.array(["foo", "bar", "baz"], dtype=object)
self.s = Series(
[Timestamp("20110101"), Timestamp("20120101"), Timestamp("20130101")]
* N
@@ -112,11 +112,11 @@
Index(self.s)
class MultiIndexConstructor:
def setup(self):
- N = 10 ** 4
+ N = 10**4
self.iterables = [tm.makeStringIndex(N), range(20)]
def time_multiindex_from_iterables(self):
MultiIndex.from_product(self.iterables)
--- a/pandas:asv_bench/benchmarks/eval.py
+++ b/pandas:asv_bench/benchmarks/eval.py
@@ -41,11 +41,11 @@
expr.set_numexpr_threads()
class Query:
def setup(self):
- N = 10 ** 6
+ N = 10**6
halfway = (N // 2) - 1
index = pd.date_range("20010101", periods=N, freq="T")
s = pd.Series(index)
self.ts = s.iloc[halfway]
self.df = pd.DataFrame({"a": np.random.randn(N), "dates": index}, index=index)
--- a/pandas:asv_bench/benchmarks/frame_ctor.py
+++ b/pandas:asv_bench/benchmarks/frame_ctor.py
@@ -72,11 +72,11 @@
params = [Nano(1), Hour(1)]
param_names = ["offset"]
def setup(self, offset):
- N = 10 ** 3
+ N = 10**3
idx = date_range(Timestamp("1/1/1900"), freq=offset, periods=N)
df = DataFrame(np.random.randn(N, 10), index=idx)
self.d = df.to_dict()
def time_dict_with_timestamp_offsets(self, offset):
--- a/pandas:asv_bench/benchmarks/frame_methods.py
+++ b/pandas:asv_bench/benchmarks/frame_methods.py
@@ -48,11 +48,11 @@
self.df.lookup(self.row_labels_all, self.col_labels_all)
class Reindex:
def setup(self):
- N = 10 ** 3
+ N = 10**3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.idx_cols = np.random.randint(0, N, N)
self.df2 = DataFrame(
{
@@ -82,11 +82,11 @@
self.df2.reindex(np.random.permutation(range(1200)))
class Rename:
def setup(self):
- N = 10 ** 3
+ N = 10**3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.dict_idx = {k: k for k in self.idx}
self.df2 = DataFrame(
{
@@ -327,11 +327,11 @@
self.bools.astype(float).mask(self.mask)
class Isnull:
def setup(self):
- N = 10 ** 3
+ N = 10**3
self.df_no_null = DataFrame(np.random.randn(N, N))
sample = np.array([np.nan, 1.0])
data = np.random.choice(sample, (N, N))
self.df = DataFrame(data)
@@ -495,11 +495,11 @@
self.df.dtypes
class Equals:
def setup(self):
- N = 10 ** 3
+ N = 10**3
self.float_df = DataFrame(np.random.randn(N, N))
self.float_df_nan = self.float_df.copy()
self.float_df_nan.iloc[-1, -1] = np.nan
self.object_df = DataFrame("foo", index=range(N), columns=range(N))
@@ -616,11 +616,11 @@
params = [0, 1]
param_names = ["axis"]
def setup(self, axis):
- self.N = 10 ** 4
+ self.N = 10**4
self.df = DataFrame(np.random.randn(self.N, self.N))
def time_frame_xs(self, axis):
self.df.xs(self.N / 2, axis=axis)
@@ -716,13 +716,13 @@
class Describe:
def setup(self):
self.df = DataFrame(
{
- "a": np.random.randint(0, 100, 10 ** 6),
- "b": np.random.randint(0, 100, 10 ** 6),
- "c": np.random.randint(0, 100, 10 ** 6),
+ "a": np.random.randint(0, 100, 10**6),
+ "b": np.random.randint(0, 100, 10**6),
+ "c": np.random.randint(0, 100, 10**6),
}
)
def time_series_describe(self):
self.df["a"].describe()
--- a/pandas:asv_bench/benchmarks/gil.py
+++ b/pandas:asv_bench/benchmarks/gil.py
@@ -53,12 +53,12 @@
param_names = ["threads", "method"]
def setup(self, threads, method):
if not have_real_test_parallel:
raise NotImplementedError
- N = 10 ** 6
- ngroups = 10 ** 3
+ N = 10**6
+ ngroups = 10**3
df = DataFrame(
{"key": np.random.randint(0, ngroups, size=N), "data": np.random.randn(N)}
)
@test_parallel(num_threads=threads)
@@ -86,12 +86,12 @@
param_names = ["threads"]
def setup(self, threads):
if not have_real_test_parallel:
raise NotImplementedError
- size = 2 ** 22
- ngroups = 10 ** 3
+ size = 2**22
+ ngroups = 10**3
data = Series(np.random.randint(0, ngroups, size=size))
@test_parallel(num_threads=threads)
def get_groups():
data.groupby(data).groups
@@ -108,11 +108,11 @@
param_names = ["dtype"]
def setup(self, dtype):
if not have_real_test_parallel:
raise NotImplementedError
- N = 10 ** 6
+ N = 10**6
df = DataFrame({"col": np.arange(N, dtype=dtype)})
indexer = np.arange(100, len(df) - 100)
@test_parallel(num_threads=2)
def parallel_take1d():
@@ -131,12 +131,12 @@
repeat = 5
def setup(self):
if not have_real_test_parallel:
raise NotImplementedError
- N = 10 ** 7
- k = 5 * 10 ** 5
+ N = 10**7
+ k = 5 * 10**5
kwargs_list = [{"arr": np.random.randn(N)}, {"arr": np.random.randn(N)}]
@test_parallel(num_threads=2, kwargs_list=kwargs_list)
def parallel_kth_smallest(arr):
algos.kth_smallest(arr, k)
@@ -149,11 +149,11 @@
class ParallelDatetimeFields:
def setup(self):
if not have_real_test_parallel:
raise NotImplementedError
- N = 10 ** 6
+ N = 10**6
self.dti = date_range("1900-01-01", periods=N, freq="T")
self.period = self.dti.to_period("D")
def time_datetime_field_year(self):
@test_parallel(num_threads=2)
--- a/pandas:asv_bench/benchmarks/groupby.py
+++ b/pandas:asv_bench/benchmarks/groupby.py
@@ -71,11 +71,11 @@
param_names = ["factor"]
params = [4, 5]
def setup(self, factor):
- N = 10 ** factor
+ N = 10**factor
# two cases:
# - small groups: small data (N**4) + many labels (2000) -> average group
# size of 5 (-> larger overhead of slicing method)
# - larger groups: larger data (N**5) + fewer labels (20) -> average group
# size of 5000
@@ -114,11 +114,11 @@
param_names = ["key"]
params = ["int64_small", "int64_large", "object_small", "object_large"]
def setup_cache(self):
- size = 10 ** 6
+ size = 10**6
data = {
"int64_small": Series(np.random.randint(0, 100, size=size)),
"int64_large": Series(np.random.randint(0, 10000, size=size)),
"object_small": Series(
tm.makeStringIndex(100).take(np.random.randint(0, 100, size=size))
@@ -158,11 +158,11 @@
param_names = ["dtype"]
params = ["float32", "float64", "datetime", "object"]
def setup(self, dtype):
- N = 10 ** 5
+ N = 10**5
# with datetimes (GH7555)
if dtype == "datetime":
values = date_range("1/1/2011", periods=N, freq="s")
elif dtype == "object":
values = ["foo"] * N
@@ -266,11 +266,11 @@
df.groupby(["key1", "key2"]).nunique()
class AggFunctions:
def setup_cache(self):
- N = 10 ** 5
+ N = 10**5
fac1 = np.array(["A", "B", "C"], dtype="O")
fac2 = np.array(["one", "two"], dtype="O")
df = DataFrame(
{
"key1": fac1.take(np.random.randint(0, 3, size=N)),
@@ -299,11 +299,11 @@
df.groupby("key1").agg([sum, min, max])
class GroupStrings:
def setup(self):
- n = 2 * 10 ** 5
+ n = 2 * 10**5
alpha = list(map("".join, product(ascii_letters, repeat=4)))
data = np.random.choice(alpha, (n // 5, 4), replace=False)
data = np.repeat(data, 5, axis=0)
self.df = DataFrame(data, columns=list("abcd"))
self.df["joe"] = (np.random.randn(len(self.df)) * 10).round(3)
@@ -313,11 +313,11 @@
self.df.groupby(list("abcd")).max()
class MultiColumn:
def setup_cache(self):
- N = 10 ** 5
+ N = 10**5
key1 = np.tile(np.arange(100, dtype=object), 1000)
key2 = key1.copy()
np.random.shuffle(key1)
np.random.shuffle(key2)
df = DataFrame(
@@ -343,11 +343,11 @@
df.groupby(["key1", "key2"])["data1"].agg(np.sum)
class Size:
def setup(self):
- n = 10 ** 5
+ n = 10**5
offsets = np.random.randint(n, size=n).astype("timedelta64[ns]")
dates = np.datetime64("now") + offsets
self.df = DataFrame(
{
"key1": np.random.randint(0, 500, size=n),
@@ -577,11 +577,11 @@
["float64", "float32", "int64", "datetime64"],
["first", "average", "dense", "min", "max"],
]
def setup(self, dtype, tie_method):
- N = 10 ** 4
+ N = 10**4
if dtype == "datetime64":
data = np.array([Timestamp("2011/01/01")] * N, dtype=dtype)
else:
data = np.array([1] * N, dtype=dtype)
self.df = DataFrame({"values": data, "key": ["foo"] * N})
@@ -635,11 +635,11 @@
self.df.groupby("a")[self.df.columns[1:]].agg(method)
class Categories:
def setup(self):
- N = 10 ** 5
+ N = 10**5
arr = np.random.random(N)
data = {"a": Categorical(np.random.randint(10000, size=N)), "b": arr}
self.df = DataFrame(data)
data = {
"a": Categorical(np.random.randint(10000, size=N), ordered=True),
@@ -677,18 +677,18 @@
# GH 14338
params = ["period_range", "date_range", "date_range_tz"]
param_names = ["grouper"]
def setup(self, grouper):
- N = 10 ** 4
+ N = 10**4
rng_map = {
"period_range": period_range,
"date_range": date_range,
"date_range_tz": partial(date_range, tz="US/Central"),
}
self.grouper = rng_map[grouper]("1900-01-01", freq="D", periods=N)
- self.df = DataFrame(np.random.randn(10 ** 4, 2))
+ self.df = DataFrame(np.random.randn(10**4, 2))
def time_sum(self, grouper):
self.df.groupby(self.grouper).sum()
@@ -793,11 +793,11 @@
param_names = ["parallel"]
params = [[True, False]]
def setup(self, parallel):
- N = 10 ** 3
+ N = 10**3
data = DataFrame(
{0: [str(i) for i in range(100)] * N, 1: list(range(100)) * N},
columns=[0, 1],
)
self.parallel = parallel
@@ -836,11 +836,11 @@
param_names = ["parallel"]
params = [[True, False]]
def setup(self, parallel):
- N = 10 ** 3
+ N = 10**3
data = DataFrame(
{0: [str(i) for i in range(100)] * N, 1: list(range(100)) * N},
columns=[0, 1],
)
self.parallel = parallel
@@ -899,11 +899,11 @@
self.grouper.agg(function, engine="cython")
class Sample:
def setup(self):
- N = 10 ** 3
+ N = 10**3
self.df = DataFrame({"a": np.zeros(N)})
self.groups = np.arange(0, N)
self.weights = np.ones(N)
def time_sample(self):
--- a/pandas:asv_bench/benchmarks/hash_functions.py
+++ b/pandas:asv_bench/benchmarks/hash_functions.py
@@ -14,25 +14,25 @@
class Float64GroupIndex:
# GH28303
def setup(self):
self.df = pd.date_range(
- start="1/1/2018", end="1/2/2018", periods=10 ** 6
+ start="1/1/2018", end="1/2/2018", periods=10**6
).to_frame()
- self.group_index = np.round(self.df.index.astype(int) / 10 ** 9)
+ self.group_index = np.round(self.df.index.astype(int) / 10**9)
def time_groupby(self):
self.df.groupby(self.group_index).last()
class UniqueAndFactorizeArange:
params = range(4, 16)
param_names = ["exponent"]
def setup(self, exponent):
- a = np.arange(10 ** 4, dtype="float64")
- self.a2 = (a + 10 ** exponent).repeat(100)
+ a = np.arange(10**4, dtype="float64")
+ self.a2 = (a + 10**exponent).repeat(100)
def time_factorize(self, exponent):
pd.factorize(self.a2)
def time_unique(self, exponent):
@@ -41,11 +41,11 @@
class NumericSeriesIndexing:
params = [
(pd.Int64Index, pd.UInt64Index, pd.Float64Index),
- (10 ** 4, 10 ** 5, 5 * 10 ** 5, 10 ** 6, 5 * 10 ** 6),
+ (10**4, 10**5, 5 * 10**5, 10**6, 5 * 10**6),
]
param_names = ["index_dtype", "N"]
def setup(self, index, N):
vals = np.array(list(range(55)) + [54] + list(range(55, N - 1)))
@@ -59,11 +59,11 @@
class NumericSeriesIndexingShuffled:
params = [
(pd.Int64Index, pd.UInt64Index, pd.Float64Index),
- (10 ** 4, 10 ** 5, 5 * 10 ** 5, 10 ** 6, 5 * 10 ** 6),
+ (10**4, 10**5, 5 * 10**5, 10**6, 5 * 10**6),
]
param_names = ["index_dtype", "N"]
def setup(self, index, N):
vals = np.array(list(range(55)) + [54] + list(range(55, N - 1)))
--- a/pandas:asv_bench/benchmarks/index_cached_properties.py
+++ b/pandas:asv_bench/benchmarks/index_cached_properties.py
@@ -20,11 +20,11 @@
]
]
param_names = ["index_type"]
def setup(self, index_type):
- N = 10 ** 5
+ N = 10**5
if index_type == "MultiIndex":
self.idx = pd.MultiIndex.from_product(
[pd.date_range("1/1/2000", freq="T", periods=N // 2), ["a", "b"]]
)
elif index_type == "DatetimeIndex":
--- a/pandas:asv_bench/benchmarks/index_object.py
+++ b/pandas:asv_bench/benchmarks/index_object.py
@@ -23,11 +23,11 @@
["intersection", "union", "symmetric_difference"],
)
param_names = ["dtype", "method"]
def setup(self, dtype, method):
- N = 10 ** 5
+ N = 10**5
dates_left = date_range("1/1/2000", periods=N, freq="T")
fmt = "%Y-%m-%d %H:%M:%S"
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
str_left = tm.makeStringIndex(N)
@@ -44,23 +44,23 @@
getattr(self.left, method)(self.right)
class SetDisjoint:
def setup(self):
- N = 10 ** 5
+ N = 10**5
B = N + 20000
self.datetime_left = DatetimeIndex(range(N))
self.datetime_right = DatetimeIndex(range(N, B))
def time_datetime_difference_disjoint(self):
self.datetime_left.difference(self.datetime_right)
class Range:
def setup(self):
- self.idx_inc = RangeIndex(start=0, stop=10 ** 6, step=3)
- self.idx_dec = RangeIndex(start=10 ** 6, stop=-1, step=-3)
+ self.idx_inc = RangeIndex(start=0, stop=10**6, step=3)
+ self.idx_dec = RangeIndex(start=10**6, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
@@ -131,11 +131,11 @@
params = ["String", "Float", "Int"]
param_names = ["dtype"]
def setup(self, dtype):
- N = 10 ** 6
+ N = 10**6
self.idx = getattr(tm, f"make{dtype}Index")(N)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
half = N // 2
@@ -184,11 +184,11 @@
self.ind.get_loc(0)
class IntervalIndexMethod:
# GH 24813
- params = [10 ** 3, 10 ** 5]
+ params = [10**3, 10**5]
def setup(self, N):
left = np.append(np.arange(N), np.array(0))
right = np.append(np.arange(1, N + 1), np.array(1))
self.intv = IntervalIndex.from_arrays(left, right)
--- a/pandas:asv_bench/benchmarks/indexing.py
+++ b/pandas:asv_bench/benchmarks/indexing.py
@@ -35,11 +35,11 @@
("unique_monotonic_inc", "nonunique_monotonic_inc"),
]
param_names = ["index_dtype", "index_structure"]
def setup(self, index, index_structure):
- N = 10 ** 6
+ N = 10**6
indices = {
"unique_monotonic_inc": index(range(N)),
"nonunique_monotonic_inc": index(
list(range(55)) + [54] + list(range(55, N - 1))
),
@@ -95,11 +95,11 @@
("unique_monotonic_inc", "nonunique_monotonic_inc", "non_monotonic"),
]
param_names = ["index_dtype", "index_structure"]
def setup(self, index, index_structure):
- N = 10 ** 6
+ N = 10**6
if index == "string":
index = tm.makeStringIndex(N)
elif index == "datetime":
index = date_range("1900", periods=N, freq="s")
elif index == "period":
@@ -261,11 +261,11 @@
params = ["monotonic_incr", "monotonic_decr", "non_monotonic"]
param_names = ["index"]
def setup(self, index):
- N = 10 ** 5
+ N = 10**5
values = list("a" * N + "b" * N + "c" * N)
indices = {
"monotonic_incr": CategoricalIndex(values),
"monotonic_decr": CategoricalIndex(reversed(values)),
"non_monotonic": CategoricalIndex(list("abc" * N)),
@@ -330,11 +330,11 @@
class IndexSingleRow:
params = [True, False]
param_names = ["unique_cols"]
def setup(self, unique_cols):
- arr = np.arange(10 ** 7).reshape(-1, 10)
+ arr = np.arange(10**7).reshape(-1, 10)
df = DataFrame(arr)
dtypes = ["u1", "u2", "u4", "u8", "i1", "i2", "i4", "i8", "f8", "f4"]
for i, d in enumerate(dtypes):
df[i] = df[i].astype(d)
@@ -362,11 +362,11 @@
self.df["date"] = self.df.index
class InsertColumns:
def setup(self):
- self.N = 10 ** 3
+ self.N = 10**3
self.df = DataFrame(index=range(self.N))
self.df2 = DataFrame(np.random.randn(self.N, 2))
def time_insert(self):
for i in range(100):
--- a/pandas:asv_bench/benchmarks/indexing_engines.py
+++ b/pandas:asv_bench/benchmarks/indexing_engines.py
@@ -34,11 +34,11 @@
params = [
_get_numeric_engines(),
["monotonic_incr", "monotonic_decr", "non_monotonic"],
[True, False],
- [10 ** 5, 2 * 10 ** 6], # 2e6 is above SIZE_CUTOFF
+ [10**5, 2 * 10**6], # 2e6 is above SIZE_CUTOFF
]
param_names = ["engine_and_dtype", "index_type", "unique", "N"]
def setup(self, engine_and_dtype, index_type, unique, N):
engine, dtype = engine_and_dtype
@@ -84,11 +84,11 @@
params = [("monotonic_incr", "monotonic_decr", "non_monotonic")]
param_names = ["index_type"]
def setup(self, index_type):
- N = 10 ** 5
+ N = 10**5
values = list("a" * N + "b" * N + "c" * N)
arr = {
"monotonic_incr": np.array(values, dtype=object),
"monotonic_decr": np.array(list(reversed(values)), dtype=object),
"non_monotonic": np.array(list("abc") * N, dtype=object),
--- a/pandas:asv_bench/benchmarks/inference.py
+++ b/pandas:asv_bench/benchmarks/inference.py
@@ -83,12 +83,12 @@
class MaybeConvertNumeric:
# maybe_convert_numeric depends _exclusively_ on _libs, could
# go in benchmarks/libs.py
def setup_cache(self):
- N = 10 ** 6
- arr = np.repeat([2 ** 63], N) + np.arange(N).astype("uint64")
+ N = 10**6
+ arr = np.repeat([2**63], N) + np.arange(N).astype("uint64")
data = arr.astype(object)
data[1::2] = arr[1::2].astype(str)
data[-1] = -1
return data
@@ -99,11 +99,11 @@
class MaybeConvertObjects:
# maybe_convert_objects depends _almost_ exclusively on _libs, but
# does have some run-time imports from outside of _libs
def setup(self):
- N = 10 ** 5
+ N = 10**5
data = list(range(N))
data[0] = NaT
data = np.array(data)
self.data = data
--- a/pandas:asv_bench/benchmarks/io/csv.py
+++ b/pandas:asv_bench/benchmarks/io/csv.py
@@ -229,12 +229,12 @@
read_csv(self.fname, skiprows=skiprows, engine=engine)
class ReadUint64Integers(StringIORewind):
def setup(self):
- self.na_values = [2 ** 63 + 500]
- arr = np.arange(10000).astype("uint64") + 2 ** 63
+ self.na_values = [2**63 + 500]
+ arr = np.arange(10000).astype("uint64") + 2**63
self.data1 = StringIO("\n".join(arr.astype(str).tolist()))
arr = arr.astype(object)
arr[500] = -1
self.data2 = StringIO("\n".join(arr.astype(str).tolist()))
--- a/pandas:asv_bench/benchmarks/io/json.py
+++ b/pandas:asv_bench/benchmarks/io/json.py
@@ -107,11 +107,11 @@
["df", "df_date_idx", "df_td_int_ts", "df_int_floats", "df_int_float_str"],
]
param_names = ["orient", "frame"]
def setup(self, orient, frame):
- N = 10 ** 5
+ N = 10**5
ncols = 5
index = date_range("20000101", periods=N, freq="H")
timedeltas = timedelta_range(start=1, periods=N, freq="s")
datetimes = date_range(start=1, periods=N, freq="s")
ints = np.random.randint(100000000, size=N)
@@ -191,11 +191,11 @@
fname = "__test__.json"
params = [["split", "columns", "index", "values", "records"]]
param_names = ["orient"]
def setup(self, orient):
- N = 10 ** 5
+ N = 10**5
index = date_range("20000101", periods=N, freq="H")
timedeltas = timedelta_range(start=1, periods=N, freq="s")
datetimes = date_range(start=1, periods=N, freq="s")
self.df = DataFrame(
{
@@ -214,11 +214,11 @@
class ToJSONLines(BaseIO):
fname = "__test__.json"
def setup(self):
- N = 10 ** 5
+ N = 10**5
ncols = 5
index = date_range("20000101", periods=N, freq="H")
timedeltas = timedelta_range(start=1, periods=N, freq="s")
datetimes = date_range(start=1, periods=N, freq="s")
ints = np.random.randint(100000000, size=N)
--- a/pandas:asv_bench/benchmarks/join_merge.py
+++ b/pandas:asv_bench/benchmarks/join_merge.py
@@ -224,11 +224,11 @@
params = ["inner", "outer", "left", "right"]
param_names = ["how"]
def setup(self, how):
- low, high, n = -1000, 1000, 10 ** 6
+ low, high, n = -1000, 1000, 10**6
self.left = DataFrame(
np.random.randint(low, high, (n, 7)), columns=list("ABCDEFG")
)
self.left["left"] = self.left.sum(axis=1)
self.right = self.left.sample(frac=1).rename({"left": "right"}, axis=1)
@@ -380,12 +380,12 @@
)
class Align:
def setup(self):
- size = 5 * 10 ** 5
- rng = np.arange(0, 10 ** 13, 10 ** 7)
+ size = 5 * 10**5
+ rng = np.arange(0, 10**13, 10**7)
stamps = np.datetime64("now").view("i8") + rng
idx1 = np.sort(np.random.choice(stamps, size, replace=False))
idx2 = np.sort(np.random.choice(stamps, size, replace=False))
self.ts1 = Series(np.random.randn(size), idx1)
self.ts2 = Series(np.random.randn(size), idx2)
--- a/pandas:asv_bench/benchmarks/multiindex_object.py
+++ b/pandas:asv_bench/benchmarks/multiindex_object.py
@@ -201,11 +201,11 @@
("intersection", "union", "symmetric_difference"),
]
param_names = ["index_structure", "dtype", "method"]
def setup(self, index_structure, dtype, method):
- N = 10 ** 5
+ N = 10**5
level1 = range(1000)
level2 = date_range(start="1/1/2000", periods=N // 1000)
dates_left = MultiIndex.from_product([level1, level2])
--- a/pandas:asv_bench/benchmarks/replace.py
+++ b/pandas:asv_bench/benchmarks/replace.py
@@ -7,11 +7,11 @@
params = [True, False]
param_names = ["inplace"]
def setup(self, inplace):
- N = 10 ** 6
+ N = 10**6
rng = pd.date_range("1/1/2000", periods=N, freq="min")
data = np.random.randn(N)
data[::2] = np.nan
self.ts = pd.Series(data, index=rng)
@@ -26,14 +26,14 @@
params = [True, False]
param_names = ["inplace"]
def setup(self, inplace):
- N = 10 ** 5
- start_value = 10 ** 5
+ N = 10**5
+ start_value = 10**5
self.to_rep = dict(enumerate(np.arange(N) + start_value))
- self.s = pd.Series(np.random.randint(N, size=10 ** 3))
+ self.s = pd.Series(np.random.randint(N, size=10**3))
def time_replace_series(self, inplace):
self.s.replace(self.to_rep, inplace=inplace)
@@ -42,11 +42,11 @@
params = [(True, False)]
param_names = ["inplace"]
def setup(self, inplace):
- self.df = pd.DataFrame({"A": 0, "B": 0}, index=range(4 * 10 ** 7))
+ self.df = pd.DataFrame({"A": 0, "B": 0}, index=range(4 * 10**7))
def time_replace_list(self, inplace):
self.df.replace([np.inf, -np.inf], np.nan, inplace=inplace)
def time_replace_list_one_match(self, inplace):
@@ -58,11 +58,11 @@
params = (["DataFrame", "Series"], ["Timestamp", "Timedelta"])
param_names = ["constructor", "replace_data"]
def setup(self, constructor, replace_data):
- N = 10 ** 3
+ N = 10**3
data = {
"Series": pd.Series(np.random.randint(N, size=N)),
"DataFrame": pd.DataFrame(
{"A": np.random.randint(N, size=N), "B": np.random.randint(N, size=N)}
),
--- a/pandas:asv_bench/benchmarks/reshape.py
+++ b/pandas:asv_bench/benchmarks/reshape.py
@@ -257,11 +257,11 @@
class Cut:
params = [[4, 10, 1000]]
param_names = ["bins"]
def setup(self, bins):
- N = 10 ** 5
+ N = 10**5
self.int_series = pd.Series(np.arange(N).repeat(5))
self.float_series = pd.Series(np.random.randn(N).repeat(5))
self.timedelta_series = pd.Series(
np.random.randint(N, size=N), dtype="timedelta64[ns]"
)
--- a/pandas:asv_bench/benchmarks/rolling.py
+++ b/pandas:asv_bench/benchmarks/rolling.py
@@ -12,11 +12,11 @@
["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum"],
)
param_names = ["constructor", "window", "dtype", "method"]
def setup(self, constructor, window, dtype, method):
- N = 10 ** 5
+ N = 10**5
arr = (100 * np.random.random(N)).astype(dtype)
self.roll = getattr(pd, constructor)(arr).rolling(window)
def time_rolling(self, constructor, window, dtype, method):
getattr(self.roll, method)()
@@ -34,11 +34,11 @@
[True, False],
)
param_names = ["constructor", "window", "dtype", "function", "raw"]
def setup(self, constructor, window, dtype, function, raw):
- N = 10 ** 3
+ N = 10**3
arr = (100 * np.random.random(N)).astype(dtype)
self.roll = getattr(pd, constructor)(arr).rolling(window)
def time_rolling(self, constructor, window, dtype, function, raw):
self.roll.apply(function, raw=raw)
@@ -53,11 +53,11 @@
["sum", "max", "min", "median", "mean"],
)
param_names = ["constructor", "dtype", "function", "engine", "method"]
def setup(self, constructor, dtype, function, engine, method):
- N = 10 ** 3
+ N = 10**3
arr = (100 * np.random.random(N)).astype(dtype)
self.data = getattr(pd, constructor)(arr)
def time_rolling_apply(self, constructor, dtype, function, engine, method):
self.data.rolling(10).apply(function, raw=True, engine=engine)
@@ -77,11 +77,11 @@
["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum"],
)
param_names = ["constructor", "window", "dtype", "method"]
def setup(self, constructor, dtype, method):
- N = 10 ** 5
+ N = 10**5
N_groupby = 100
arr = (100 * np.random.random(N)).astype(dtype)
self.expanding = getattr(pd, constructor)(arr).expanding()
self.expanding_groupby = (
pd.DataFrame({"A": arr[:N_groupby], "B": range(N_groupby)})
@@ -100,11 +100,11 @@
params = (["DataFrame", "Series"], [10, 1000], ["int", "float"], ["mean", "std"])
param_names = ["constructor", "window", "dtype", "method"]
def setup(self, constructor, window, dtype, method):
- N = 10 ** 5
+ N = 10**5
arr = (100 * np.random.random(N)).astype(dtype)
times = pd.date_range("1900", periods=N, freq="23s")
self.ewm = getattr(pd, constructor)(arr).ewm(halflife=window)
self.ewm_times = getattr(pd, constructor)(arr).ewm(
halflife="1 Day", times=times
@@ -125,11 +125,11 @@
["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum"],
)
param_names = ["constructor", "window", "dtype", "method"]
def setup(self, constructor, window, dtype, method):
- N = 10 ** 5
+ N = 10**5
arr = (100 * np.random.random(N)).astype(dtype)
index = pd.date_range("2017-01-01", periods=N, freq="5s")
self.roll = getattr(pd, constructor)(arr, index=index).rolling(window)
@@ -137,11 +137,11 @@
params = ([10, 1000, None], ["corr", "cov"], [True, False])
param_names = ["window", "method", "pairwise"]
def setup(self, window, method, pairwise):
- N = 10 ** 4
+ N = 10**4
n_groups = 20
groups = [i for _ in range(N // n_groups) for i in range(n_groups)]
arr = np.random.random(N)
self.df = pd.DataFrame(arr)
self.df_group = pd.DataFrame({"A": groups, "B": arr}).groupby("A")
@@ -170,11 +170,11 @@
["linear", "nearest", "lower", "higher", "midpoint"],
)
param_names = ["constructor", "window", "dtype", "percentile"]
def setup(self, constructor, window, dtype, percentile, interpolation):
- N = 10 ** 5
+ N = 10**5
arr = np.random.random(N).astype(dtype)
self.roll = getattr(pd, constructor)(arr).rolling(window)
def time_quantile(self, constructor, window, dtype, percentile, interpolation):
self.roll.quantile(percentile, interpolation=interpolation)
@@ -197,11 +197,11 @@
"ascending",
"method",
]
def setup(self, constructor, window, dtype, percentile, ascending, method):
- N = 10 ** 5
+ N = 10**5
arr = np.random.random(N).astype(dtype)
self.roll = getattr(pd, constructor)(arr).rolling(window)
def time_rank(self, constructor, window, dtype, percentile, ascending, method):
self.roll.rank(pct=percentile, ascending=ascending, method=method)
@@ -210,11 +210,11 @@
class PeakMemFixedWindowMinMax:
params = ["min", "max"]
def setup(self, operation):
- N = 10 ** 6
+ N = 10**6
arr = np.random.random(N)
self.roll = pd.Series(arr).rolling(2)
def peakmem_fixed(self, operation):
for x in range(5):
@@ -229,11 +229,11 @@
["median", "mean", "max", "min", "kurt", "sum"],
)
param_names = ["constructor", "window_size", "dtype", "method"]
def setup(self, constructor, window_size, dtype, method):
- N = 10 ** 5
+ N = 10**5
arr = np.random.random(N).astype(dtype)
indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=window_size)
self.roll = getattr(pd, constructor)(arr).rolling(window=indexer)
def time_rolling(self, constructor, window_size, dtype, method):
--- a/pandas:asv_bench/benchmarks/series_methods.py
+++ b/pandas:asv_bench/benchmarks/series_methods.py
@@ -30,11 +30,11 @@
class ToFrame:
params = [["int64", "datetime64[ns]", "category", "Int64"], [None, "foo"]]
param_names = ["dtype", "name"]
def setup(self, dtype, name):
- arr = np.arange(10 ** 5)
+ arr = np.arange(10**5)
ser = Series(arr, dtype=dtype)
self.ser = ser
def time_to_frame(self, dtype, name):
self.ser.to_frame(name)
@@ -59,11 +59,11 @@
params = ["int", "datetime"]
param_names = ["dtype"]
def setup(self, dtype):
- N = 10 ** 6
+ N = 10**6
data = {
"int": np.random.randint(1, 10, N),
"datetime": date_range("2000-01-01", freq="S", periods=N),
}
self.s = Series(data[dtype])
@@ -92,11 +92,11 @@
"str",
]
param_names = ["dtype"]
def setup(self, dtype):
- N = 10 ** 5
+ N = 10**5
data = np.array([1] * N + [2] * N + [3] * N).astype(dtype)
self.s = Series(data)
def time_searchsorted(self, dtype):
key = "2" if dtype == "str" else 2
@@ -128,11 +128,11 @@
def time_map(self, mapper, *args, **kwargs):
self.s.map(self.map_data)
class Clip:
- params = [50, 1000, 10 ** 5]
+ params = [50, 1000, 10**5]
param_names = ["n"]
def setup(self, n):
self.s = Series(np.random.randn(n))
@@ -140,11 +140,11 @@
self.s.clip(0, 1)
class ValueCounts:
- params = [[10 ** 3, 10 ** 4, 10 ** 5], ["int", "uint", "float", "object"]]
+ params = [[10**3, 10**4, 10**5], ["int", "uint", "float", "object"]]
param_names = ["N", "dtype"]
def setup(self, N, dtype):
self.s = Series(np.random.randint(0, N, size=10 * N)).astype(dtype)
@@ -152,11 +152,11 @@
self.s.value_counts()
class Mode:
- params = [[10 ** 3, 10 ** 4, 10 ** 5], ["int", "uint", "float", "object"]]
+ params = [[10**3, 10**4, 10**5], ["int", "uint", "float", "object"]]
param_names = ["N", "dtype"]
def setup(self, N, dtype):
self.s = Series(np.random.randint(0, N, size=10 * N)).astype(dtype)
@@ -173,19 +173,19 @@
class SeriesGetattr:
# https://github.com/pandas-dev/pandas/issues/19764
def setup(self):
- self.s = Series(1, index=date_range("2012-01-01", freq="s", periods=10 ** 6))
+ self.s = Series(1, index=date_range("2012-01-01", freq="s", periods=10**6))
def time_series_datetimeindex_repr(self):
getattr(self.s, "a", None)
class All:
- params = [[10 ** 3, 10 ** 6], ["fast", "slow"], ["bool", "boolean"]]
+ params = [[10**3, 10**6], ["fast", "slow"], ["bool", "boolean"]]
param_names = ["N", "case", "dtype"]
def setup(self, N, case, dtype):
val = case != "fast"
self.s = Series([val] * N, dtype=dtype)
@@ -194,11 +194,11 @@
self.s.all()
class Any:
- params = [[10 ** 3, 10 ** 6], ["fast", "slow"], ["bool", "boolean"]]
+ params = [[10**3, 10**6], ["fast", "slow"], ["bool", "boolean"]]
param_names = ["N", "case", "dtype"]
def setup(self, N, case, dtype):
val = case == "fast"
self.s = Series([val] * N, dtype=dtype)
@@ -222,11 +222,11 @@
"argmax",
"skew",
"kurt",
"prod",
],
- [10 ** 3, 10 ** 6],
+ [10**3, 10**6],
["int8", "int32", "int64", "float64", "Int64", "boolean"],
]
param_names = ["func", "N", "dtype"]
def setup(self, func, N, dtype):
--- a/pandas:asv_bench/benchmarks/sparse.py
+++ b/pandas:asv_bench/benchmarks/sparse.py
@@ -38,11 +38,11 @@
params = ([0.1, 0.01], [0, np.nan], [np.int64, np.float64, object])
param_names = ["dense_proportion", "fill_value", "dtype"]
def setup(self, dense_proportion, fill_value, dtype):
- N = 10 ** 6
+ N = 10**6
self.array = make_array(N, dense_proportion, fill_value, dtype)
def time_sparse_array(self, dense_proportion, fill_value, dtype):
SparseArray(self.array, fill_value=fill_value, dtype=dtype)
@@ -109,11 +109,11 @@
params = ([0.1, 0.01], [0, np.nan])
param_names = ["dense_proportion", "fill_value"]
def setup(self, dense_proportion, fill_value):
- N = 10 ** 6
+ N = 10**6
arr1 = make_array(N, dense_proportion, fill_value, np.int64)
self.array1 = SparseArray(arr1, fill_value=fill_value)
arr2 = make_array(N, dense_proportion, fill_value, np.int64)
self.array2 = SparseArray(arr2, fill_value=fill_value)
@@ -134,11 +134,11 @@
params = [np.nan, 0]
param_names = ["fill_value"]
def setup(self, fill_value):
- N = 10 ** 6
+ N = 10**6
self.arr1 = self.make_block_array(
length=N, num_blocks=1000, block_size=10, fill_value=fill_value
)
self.arr2 = self.make_block_array(
length=N, num_blocks=1000, block_size=10, fill_value=fill_value
--- a/pandas:asv_bench/benchmarks/stat_ops.py
+++ b/pandas:asv_bench/benchmarks/stat_ops.py
@@ -81,11 +81,11 @@
params = [["DataFrame", "Series"], [True, False]]
param_names = ["constructor", "pct"]
def setup(self, constructor, pct):
- values = np.random.randn(10 ** 5)
+ values = np.random.randn(10**5)
self.data = getattr(pd, constructor)(values)
def time_rank(self, constructor, pct):
self.data.rank(pct=pct)
--- a/pandas:asv_bench/benchmarks/strings.py
+++ b/pandas:asv_bench/benchmarks/strings.py
@@ -15,22 +15,22 @@
params = ["str", "string[python]", "string[pyarrow]"]
param_names = ["dtype"]
def setup(self, dtype):
try:
- self.s = Series(tm.makeStringIndex(10 ** 5), dtype=dtype)
+ self.s = Series(tm.makeStringIndex(10**5), dtype=dtype)
except ImportError:
raise NotImplementedError
class Construction:
params = ["str", "string"]
param_names = ["dtype"]
def setup(self, dtype):
- self.series_arr = tm.rands_array(nchars=10, size=10 ** 5)
+ self.series_arr = tm.rands_array(nchars=10, size=10**5)
self.frame_arr = self.series_arr.reshape((50_000, 2)).copy()
# GH37371. Testing construction of string series/frames from ExtensionArrays
self.series_cat_arr = Categorical(self.series_arr)
self.frame_cat_arr = Categorical(self.frame_arr)
@@ -178,11 +178,11 @@
params = ["int", "array"]
param_names = ["repeats"]
def setup(self, repeats):
- N = 10 ** 5
+ N = 10**5
self.s = Series(tm.makeStringIndex(N))
repeat = {"int": 1, "array": np.random.randint(1, 3, N)}
self.values = repeat[repeats]
def time_repeat(self, repeats):
@@ -193,11 +193,11 @@
params = ([0, 3], [None, ","], [None, "-"], [0.0, 0.001, 0.15])
param_names = ["other_cols", "sep", "na_rep", "na_frac"]
def setup(self, other_cols, sep, na_rep, na_frac):
- N = 10 ** 5
+ N = 10**5
mask_gen = lambda: np.random.choice([True, False], N, p=[1 - na_frac, na_frac])
self.s = Series(tm.makeStringIndex(N)).where(mask_gen())
if other_cols == 0:
# str.cat self-concatenates only for others=None
self.others = None
--- a/pandas:asv_bench/benchmarks/timeseries.py
+++ b/pandas:asv_bench/benchmarks/timeseries.py
@@ -129,11 +129,11 @@
params = [date_range, period_range, timedelta_range]
param_names = ["time_index"]
def setup(self, time_index):
- N = 10 ** 6
+ N = 10**6
if time_index is timedelta_range:
self.idx = time_index(start=0, freq="T", periods=N)
else:
self.idx = time_index(start="20140101", freq="T", periods=N)
self.exit = 10000
@@ -245,11 +245,11 @@
params = [True, False]
param_names = ["monotonic"]
def setup(self, monotonic):
- N = 10 ** 5
+ N = 10**5
idx = date_range(start="1/1/2000", periods=N, freq="s")
self.s = Series(np.random.randn(N), index=idx)
if not monotonic:
self.s = self.s.sample(frac=1)
--- a/pandas:asv_bench/benchmarks/tslibs/normalize.py
+++ b/pandas:asv_bench/benchmarks/tslibs/normalize.py
@@ -29,11 +29,11 @@
# use an array that will have is_date_array_normalized give True,
# so we do not short-circuit early.
dti = pd.date_range("2016-01-01", periods=10, tz=tz).repeat(size // 10)
self.i8data = dti.asi8
- if size == 10 ** 6 and tz is tzlocal_obj:
+ if size == 10**6 and tz is tzlocal_obj:
# tzlocal is cumbersomely slow, so skip to keep runtime in check
raise NotImplementedError
def time_normalize_i8_timestamps(self, size, tz):
normalize_i8_timestamps(self.i8data, tz)
--- a/pandas:asv_bench/benchmarks/tslibs/period.py
+++ b/pandas:asv_bench/benchmarks/tslibs/period.py
@@ -128,11 +128,11 @@
_tzs,
]
param_names = ["size", "freq", "tz"]
def setup(self, size, freq, tz):
- if size == 10 ** 6 and tz is tzlocal_obj:
+ if size == 10**6 and tz is tzlocal_obj:
# tzlocal is cumbersomely slow, so skip to keep runtime in check
raise NotImplementedError
arr = np.arange(10, dtype="i8").repeat(size // 10)
self.i8values = arr
--- a/pandas:asv_bench/benchmarks/tslibs/resolution.py
+++ b/pandas:asv_bench/benchmarks/tslibs/resolution.py
@@ -38,11 +38,11 @@
_tzs,
)
param_names = ["unit", "size", "tz"]
def setup(self, unit, size, tz):
- if size == 10 ** 6 and tz is tzlocal_obj:
+ if size == 10**6 and tz is tzlocal_obj:
# tzlocal is cumbersomely slow, so skip to keep runtime in check
raise NotImplementedError
arr = np.random.randint(0, 10, size=size, dtype="i8")
arr = arr.view(f"M8[{unit}]").astype("M8[ns]").view("i8")
--- a/pandas:asv_bench/benchmarks/tslibs/tslib.py
+++ b/pandas:asv_bench/benchmarks/tslibs/tslib.py
@@ -39,11 +39,11 @@
timezone(timedelta(minutes=60)),
pytz.timezone("US/Pacific"),
gettz("Asia/Tokyo"),
tzlocal_obj,
]
-_sizes = [0, 1, 100, 10 ** 4, 10 ** 6]
+_sizes = [0, 1, 100, 10**4, 10**6]
class TimeIntsToPydatetime:
params = (
["time", "date", "datetime", "timestamp"],
@@ -55,11 +55,11 @@
def setup(self, box, size, tz):
if box == "date" and tz is not None:
# tz is ignored, so avoid running redundant benchmarks
raise NotImplementedError # skip benchmark
- if size == 10 ** 6 and tz is _tzs[-1]:
+ if size == 10**6 and tz is _tzs[-1]:
# This is cumbersomely-slow, so skip to trim runtime
raise NotImplementedError # skip benchmark
arr = np.random.randint(0, 10, size=size, dtype="i8")
self.i8data = arr
--- a/pandas:asv_bench/benchmarks/tslibs/tz_convert.py
+++ b/pandas:asv_bench/benchmarks/tslibs/tz_convert.py
@@ -23,11 +23,11 @@
[x for x in _tzs if x is not None],
]
param_names = ["size", "tz"]
def setup(self, size, tz):
- if size == 10 ** 6 and tz is tzlocal_obj:
+ if size == 10**6 and tz is tzlocal_obj:
# tzlocal is cumbersomely slow, so skip to keep runtime in check
raise NotImplementedError
arr = np.random.randint(0, 10, size=size, dtype="i8")
self.i8data = arr
--- a/pandas:pandas/_testing/asserters.py
+++ b/pandas:pandas/_testing/asserters.py
@@ -207,11 +207,11 @@
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
- return 0.5 * 10 ** -check_less_precise
+ return 0.5 * 10**-check_less_precise
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
--- a/pandas:pandas/compat/__init__.py
+++ b/pandas:pandas/compat/__init__.py
@@ -27,11 +27,11 @@
)
PY39 = sys.version_info >= (3, 9)
PY310 = sys.version_info >= (3, 10)
PYPY = platform.python_implementation() == "PyPy"
-IS64 = sys.maxsize > 2 ** 32
+IS64 = sys.maxsize > 2**32
def set_function_name(f: F, name: str, cls) -> F:
"""
Bind the name/qualname attributes of the function.
--- a/pandas:pandas/core/arrays/datetimes.py
+++ b/pandas:pandas/core/arrays/datetimes.py
@@ -1903,12 +1903,12 @@
+ 1_721_118.5
+ (
self.hour
+ self.minute / 60
+ self.second / 3600
- + self.microsecond / 3600 / 10 ** 6
- + self.nanosecond / 3600 / 10 ** 9
+ + self.microsecond / 3600 / 10**6
+ + self.nanosecond / 3600 / 10**9
)
/ 24
)
# -----------------------------------------------------------------
--- a/pandas:pandas/core/config_init.py
+++ b/pandas:pandas/core/config_init.py
@@ -856,11 +856,11 @@
validator=is_one_of_factory(["html", "latex"]),
)
cf.register_option(
"render.max_elements",
- 2 ** 18,
+ 2**18,
styler_max_elements,
validator=is_nonnegative_int,
)
cf.register_option(
--- a/pandas:pandas/core/nanops.py
+++ b/pandas:pandas/core/nanops.py
@@ -1199,11 +1199,11 @@
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna and mask is not None:
np.putmask(adjusted, mask, 0)
- adjusted2 = adjusted ** 2
+ adjusted2 = adjusted**2
adjusted3 = adjusted2 * adjusted
m2 = adjusted2.sum(axis, dtype=np.float64)
m3 = adjusted3.sum(axis, dtype=np.float64)
# floating point error
@@ -1212,11 +1212,11 @@
# to fix the fperr to treat m2 <1e-14 as zero
m2 = _zero_out_fperr(m2)
m3 = _zero_out_fperr(m3)
with np.errstate(invalid="ignore", divide="ignore"):
- result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5)
+ result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2**1.5)
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype, copy=False)
@@ -1287,19 +1287,19 @@
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna and mask is not None:
np.putmask(adjusted, mask, 0)
- adjusted2 = adjusted ** 2
- adjusted4 = adjusted2 ** 2
+ adjusted2 = adjusted**2
+ adjusted4 = adjusted2**2
m2 = adjusted2.sum(axis, dtype=np.float64)
m4 = adjusted4.sum(axis, dtype=np.float64)
with np.errstate(invalid="ignore", divide="ignore"):
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
numerator = count * (count + 1) * (count - 1) * m4
- denominator = (count - 2) * (count - 3) * m2 ** 2
+ denominator = (count - 2) * (count - 3) * m2**2
# floating point error
#
# #18044 in _libs/windows.pyx calc_kurt follow this behavior
# to fix the fperr to treat denom <1e-14 as zero
--- a/pandas:pandas/core/roperator.py
+++ b/pandas:pandas/core/roperator.py
@@ -43,11 +43,11 @@
def rdivmod(left, right):
return divmod(right, left)
def rpow(left, right):
- return right ** left
+ return right**left
def rand_(left, right):
return operator.and_(right, left)
--- a/pandas:pandas/io/formats/excel.py
+++ b/pandas:pandas/io/formats/excel.py
@@ -450,12 +450,12 @@
Defaults to ``CSSToExcelConverter()``.
It should have signature css_declarations string -> excel style.
This is only called for body cells.
"""
- max_rows = 2 ** 20
- max_cols = 2 ** 14
+ max_rows = 2**20
+ max_cols = 2**14
def __init__(
self,
df,
na_rep: str = "",
--- a/pandas:pandas/io/formats/format.py
+++ b/pandas:pandas/io/formats/format.py
@@ -1701,11 +1701,11 @@
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
- one_day_nanos = 86400 * 10 ** 9
+ one_day_nanos = 86400 * 10**9
even_days = (
np.logical_and(consider_values, values_int % int(one_day_nanos) != 0).sum() == 0
)
if even_days:
return True
@@ -1808,11 +1808,11 @@
"""
values_int = values.view(np.int64)
consider_values = values_int != iNaT
- one_day_nanos = 86400 * 10 ** 9
+ one_day_nanos = 86400 * 10**9
# error: Unsupported operand types for % ("ExtensionArray" and "int")
not_midnight = values_int % one_day_nanos != 0 # type: ignore[operator]
# error: Argument 1 to "__call__" of "ufunc" has incompatible type
# "Union[Any, ExtensionArray, ndarray]"; expected
# "Union[Union[int, float, complex, str, bytes, generic],
@@ -2036,11 +2036,11 @@
if int_pow10 < 0:
prefix = f"E-{-int_pow10:02d}"
else:
prefix = f"E+{int_pow10:02d}"
- mant = sign * dnum / (10 ** pow10)
+ mant = sign * dnum / (10**pow10)
if self.accuracy is None: # pragma: no cover
format_str = "{mant: g}{prefix}"
else:
format_str = f"{{mant: .{self.accuracy:d}f}}{{prefix}}"
--- a/pandas:pandas/io/stata.py
+++ b/pandas:pandas/io/stata.py
@@ -613,11 +613,11 @@
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.int64: # Warn if necessary
- if data[col].max() >= 2 ** 53:
+ if data[col].max() >= 2**53:
ws = precision_loss_doc.format("uint64", "float64")
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
@@ -630,11 +630,11 @@
elif dtype == np.int64:
if data[col].max() <= 2147483620 and data[col].min() >= -2147483647:
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
- if data[col].max() >= 2 ** 53 or data[col].min() <= -(2 ** 53):
+ if data[col].max() >= 2**53 or data[col].min() <= -(2**53):
ws = precision_loss_doc.format("int64", "float64")
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
raise ValueError(
--- a/pandas:pandas/plotting/_matplotlib/converter.py
+++ b/pandas:pandas/plotting/_matplotlib/converter.py
@@ -61,11 +61,11 @@
SEC_PER_MIN = 60.0
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
-MUSEC_PER_DAY = 10 ** 6 * SEC_PER_DAY
+MUSEC_PER_DAY = 10**6 * SEC_PER_DAY
_mpl_units = {} # Cache for units overwritten by us
def get_pairs():
@@ -139,11 +139,11 @@
# make it idempotent by excluding ours.
units.registry[unit] = formatter
def _to_ordinalf(tm: pydt.time) -> float:
- tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + tm.microsecond / 10 ** 6
+ tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + tm.microsecond / 10**6
return tot_sec
def time2num(d):
if isinstance(d, str):
@@ -205,11 +205,11 @@
A string in HH:MM:SS.mmmuuu format. Microseconds,
milliseconds and seconds are only displayed if non-zero.
"""
fmt = "%H:%M:%S.%f"
s = int(x)
- msus = round((x - s) * 10 ** 6)
+ msus = round((x - s) * 10**6)
ms = msus // 1000
us = msus % 1000
m, s = divmod(s, 60)
h, m = divmod(m, 60)
_, h = divmod(h, 24)
@@ -1082,11 +1082,11 @@
@staticmethod
def format_timedelta_ticks(x, pos, n_decimals: int) -> str:
"""
Convert seconds to 'D days HH:MM:SS.F'
"""
- s, ns = divmod(x, 10 ** 9)
+ s, ns = divmod(x, 10**9)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
decimals = int(ns * 10 ** (n_decimals - 9))
s = f"{int(h):02d}:{int(m):02d}:{int(s):02d}"
@@ -1096,9 +1096,9 @@
s = f"{int(d):d} days {s}"
return s
def __call__(self, x, pos=0) -> str:
(vmin, vmax) = tuple(self.axis.get_view_interval())
- n_decimals = int(np.ceil(np.log10(100 * 10 ** 9 / abs(vmax - vmin))))
+ n_decimals = int(np.ceil(np.log10(100 * 10**9 / abs(vmax - vmin))))
if n_decimals > 9:
n_decimals = 9
return self.format_timedelta_ticks(x, pos, n_decimals)
--- a/pandas:pandas/plotting/_matplotlib/tools.py
+++ b/pandas:pandas/plotting/_matplotlib/tools.py
@@ -113,11 +113,11 @@
layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}
try:
return layouts[nplots]
except KeyError:
k = 1
- while k ** 2 < nplots:
+ while k**2 < nplots:
k += 1
if (k - 1) * k >= nplots:
return k, (k - 1)
else:
--- a/pandas:pandas/tests/apply/test_series_apply.py
+++ b/pandas:pandas/tests/apply/test_series_apply.py
@@ -376,15 +376,15 @@
def test_with_nested_series(datetime_series):
# GH 2316
# .agg with a reducer and a transform, what to do
- result = datetime_series.apply(lambda x: Series([x, x ** 2], index=["x", "x^2"]))
- expected = DataFrame({"x": datetime_series, "x^2": datetime_series ** 2})
+ result = datetime_series.apply(lambda x: Series([x, x**2], index=["x", "x^2"]))
+ expected = DataFrame({"x": datetime_series, "x^2": datetime_series**2})
tm.assert_frame_equal(result, expected)
- result = datetime_series.agg(lambda x: Series([x, x ** 2], index=["x", "x^2"]))
+ result = datetime_series.agg(lambda x: Series([x, x**2], index=["x", "x^2"]))
tm.assert_frame_equal(result, expected)
def test_replicate_describe(string_series):
# this also tests a result set that is all scalars
--- a/pandas:pandas/tests/arithmetic/test_numeric.py
+++ b/pandas:pandas/tests/arithmetic/test_numeric.py
@@ -115,16 +115,16 @@
def test_numeric_cmp_string_numexpr_path(self, box_with_array):
# GH#36377, GH#35700
box = box_with_array
xbox = box if box is not Index else np.ndarray
- obj = Series(np.random.randn(10 ** 5))
+ obj = Series(np.random.randn(10**5))
obj = tm.box_expected(obj, box, transpose=False)
result = obj == "a"
- expected = Series(np.zeros(10 ** 5, dtype=bool))
+ expected = Series(np.zeros(10**5, dtype=bool))
expected = tm.box_expected(expected, xbox, transpose=False)
tm.assert_equal(result, expected)
result = obj != "a"
tm.assert_equal(result, ~expected)
@@ -225,11 +225,11 @@
ids=lambda x: type(x).__name__,
)
def test_numeric_arr_mul_tdscalar_numexpr_path(self, scalar_td, box_with_array):
box = box_with_array
- arr = np.arange(2 * 10 ** 4).astype(np.int64)
+ arr = np.arange(2 * 10**4).astype(np.int64)
obj = tm.box_expected(arr, box, transpose=False)
expected = arr.view("timedelta64[D]").astype("timedelta64[ns]")
expected = tm.box_expected(expected, box, transpose=False)
@@ -694,11 +694,11 @@
def test_mul_index(self, numeric_idx):
idx = numeric_idx
result = idx * idx
- tm.assert_index_equal(result, idx ** 2)
+ tm.assert_index_equal(result, idx**2)
def test_mul_datelike_raises(self, numeric_idx):
idx = numeric_idx
msg = "cannot perform __rmul__ with this index type"
with pytest.raises(TypeError, match=msg):
@@ -793,11 +793,11 @@
ts.name = "ts"
df = pd.DataFrame({"A": ts})
tm.assert_series_equal(ts + ts, ts + df["A"], check_names=False)
- tm.assert_series_equal(ts ** ts, ts ** df["A"], check_names=False)
+ tm.assert_series_equal(ts**ts, ts ** df["A"], check_names=False)
tm.assert_series_equal(ts < ts, ts < df["A"], check_names=False)
tm.assert_series_equal(ts / ts, ts / df["A"], check_names=False)
# TODO: this came from tests.series.test_analytics, needs cleanup and
# de-duplication with test_modulo above
@@ -1309,11 +1309,11 @@
expected = Index(idx.values * idx.values)
tm.assert_index_equal(result, expected, exact=True)
# __pow__
idx = RangeIndex(0, 1000, 2)
- result = idx ** 2
+ result = idx**2
expected = Int64Index(idx._values) ** 2
tm.assert_index_equal(Index(result.values), expected, exact=True)
# __floordiv__
cases_exact = [
@@ -1433,11 +1433,11 @@
@pytest.mark.parametrize("power", [1, 2, 5])
@pytest.mark.parametrize("string_size", [0, 1, 2, 5])
def test_empty_str_comparison(power, string_size):
# GH 37348
- a = np.array(range(10 ** power))
+ a = np.array(range(10**power))
right = pd.DataFrame(a, dtype=np.int64)
left = " " * string_size
result = right == left
expected = pd.DataFrame(np.zeros(right.shape, dtype=bool))
--- a/pandas:pandas/tests/arithmetic/test_object.py
+++ b/pandas:pandas/tests/arithmetic/test_object.py
@@ -80,16 +80,16 @@
def test_pow_ops_object(self):
# GH#22922
# pow is weird with masking & 1, so testing here
a = Series([1, np.nan, 1, np.nan], dtype=object)
b = Series([1, np.nan, np.nan, 1], dtype=object)
- result = a ** b
- expected = Series(a.values ** b.values, dtype=object)
- tm.assert_series_equal(result, expected)
-
- result = b ** a
- expected = Series(b.values ** a.values, dtype=object)
+ result = a**b
+ expected = Series(a.values**b.values, dtype=object)
+ tm.assert_series_equal(result, expected)
+
+ result = b**a
+ expected = Series(b.values**a.values, dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
@pytest.mark.parametrize("other", ["category", "Int64"])
--- a/pandas:pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas:pandas/tests/arithmetic/test_timedelta64.py
@@ -1531,11 +1531,11 @@
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
- expected = TimedeltaIndex(rng5 ** 2)
+ expected = TimedeltaIndex(rng5**2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
@@ -2188,14 +2188,14 @@
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
- scalar_td ** td1
+ scalar_td**td1
with pytest.raises(TypeError, match=pattern):
- td1 ** scalar_td
+ td1**scalar_td
def test_add_timestamp_to_timedelta():
# GH: 35897
timestamp = Timestamp.now()
--- a/pandas:pandas/tests/arrays/datetimes/test_constructors.py
+++ b/pandas:pandas/tests/arrays/datetimes/test_constructors.py
@@ -27,11 +27,11 @@
DatetimeArray(arr[[0]].squeeze())
def test_freq_validation(self):
# GH#24623 check that invalid instances cannot be created with the
# public constructor
- arr = np.arange(5, dtype=np.int64) * 3600 * 10 ** 9
+ arr = np.arange(5, dtype=np.int64) * 3600 * 10**9
msg = (
"Inferred frequency H from passed values does not "
"conform to passed frequency W-SUN"
)
@@ -62,11 +62,11 @@
# before aware or vice-versa
with pytest.raises(ValueError, match=msg):
meth(obj)
def test_from_pandas_array(self):
- arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10 ** 9
+ arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9
result = DatetimeArray._from_sequence(arr)._with_freq("infer")
expected = pd.date_range("1970-01-01", periods=5, freq="H")._data
tm.assert_datetime_array_equal(result, expected)
--- a/pandas:pandas/tests/arrays/floating/test_arithmetic.py
+++ b/pandas:pandas/tests/arrays/floating/test_arithmetic.py
@@ -49,56 +49,56 @@
tm.assert_extension_array_equal(result, expected)
def test_pow_scalar(dtype):
a = pd.array([-1, 0, 1, None, 2], dtype=dtype)
- result = a ** 0
+ result = a**0
expected = pd.array([1, 1, 1, 1, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
- result = a ** 1
+ result = a**1
expected = pd.array([-1, 0, 1, None, 2], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
- result = a ** pd.NA
+ result = a**pd.NA
expected = pd.array([None, None, 1, None, None], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
- result = a ** np.nan
+ result = a**np.nan
# TODO np.nan should be converted to pd.NA / missing before operation?
expected = FloatingArray(
np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype=dtype.numpy_dtype),
mask=a._mask,
)
tm.assert_extension_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
- result = 0 ** a
+ result = 0**a
expected = pd.array([1, 0, None, 0], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
- result = 1 ** a
+ result = 1**a
expected = pd.array([1, 1, 1, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
- result = pd.NA ** a
+ result = pd.NA**a
expected = pd.array([1, None, None, None], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
- result = np.nan ** a
+ result = np.nan**a
expected = FloatingArray(
np.array([1, np.nan, np.nan, np.nan], dtype=dtype.numpy_dtype), mask=a._mask
)
tm.assert_extension_array_equal(result, expected)
def test_pow_array(dtype):
a = pd.array([0, 0, 0, 1, 1, 1, None, None, None], dtype=dtype)
b = pd.array([0, 1, None, 0, 1, None, 0, 1, None], dtype=dtype)
- result = a ** b
+ result = a**b
expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na():
--- a/pandas:pandas/tests/arrays/integer/test_arithmetic.py
+++ b/pandas:pandas/tests/arrays/integer/test_arithmetic.py
@@ -86,56 +86,56 @@
tm.assert_extension_array_equal(result, expected)
def test_pow_scalar():
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
- result = a ** 0
+ result = a**0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
- result = a ** 1
+ result = a**1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
- result = a ** pd.NA
+ result = a**pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
- result = a ** np.nan
+ result = a**np.nan
expected = FloatingArray(
np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64"),
np.array([False, False, False, True, False]),
)
tm.assert_extension_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
- result = 0 ** a
+ result = 0**a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
- result = 1 ** a
+ result = 1**a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
- result = pd.NA ** a
+ result = pd.NA**a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
- result = np.nan ** a
+ result = np.nan**a
expected = FloatingArray(
np.array([1, np.nan, np.nan, np.nan], dtype="float64"),
np.array([False, False, True, False]),
)
tm.assert_extension_array_equal(result, expected)
def test_pow_array():
a = pd.array([0, 0, 0, 1, 1, 1, None, None, None])
b = pd.array([0, 1, None, 0, 1, None, 0, 1, None])
- result = a ** b
+ result = a**b
expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na():
--- a/pandas:pandas/tests/arrays/integer/test_dtypes.py
+++ b/pandas:pandas/tests/arrays/integer/test_dtypes.py
@@ -216,11 +216,11 @@
tm.assert_extension_array_equal(result, expected)
def test_astype_dt64():
# GH#32435
- arr = pd.array([1, 2, 3, pd.NA]) * 10 ** 9
+ arr = pd.array([1, 2, 3, pd.NA]) * 10**9
result = arr.astype("datetime64[ns]")
expected = np.array([1, 2, 3, "NaT"], dtype="M8[s]").astype("M8[ns]")
tm.assert_numpy_array_equal(result, expected)
--- a/pandas:pandas/tests/arrays/test_datetimelike.py
+++ b/pandas:pandas/tests/arrays/test_datetimelike.py
@@ -79,11 +79,11 @@
class SharedTests:
index_cls: type[DatetimeIndex | PeriodIndex | TimedeltaIndex]
@pytest.fixture
def arr1d(self):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
return arr
def test_compare_len1_raises(self, arr1d):
# make sure we raise when comparing with different lengths, specific
@@ -146,11 +146,11 @@
result = left >= right
tm.assert_numpy_array_equal(result, ones)
def test_take(self):
- data = np.arange(100, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(100, dtype="i8") * 24 * 3600 * 10**9
np.random.shuffle(data)
freq = None if self.array_cls is not PeriodArray else "D"
arr = self.array_cls(data, freq=freq)
@@ -168,20 +168,20 @@
tm.assert_index_equal(self.index_cls(result), expected)
@pytest.mark.parametrize("fill_value", [2, 2.0, Timestamp.now().time])
def test_take_fill_raises(self, fill_value):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got"
with pytest.raises(TypeError, match=msg):
arr.take([0, 1], allow_fill=True, fill_value=fill_value)
def test_take_fill(self):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
result = arr.take([-1, 1], allow_fill=True, fill_value=None)
assert result[0] is NaT
@@ -213,11 +213,11 @@
expected = self.index_cls(np.concatenate([arr2[:-1], arr2[1:], arr2]), None)
tm.assert_index_equal(self.index_cls(result), expected)
def test_unbox_scalar(self):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
result = arr._unbox_scalar(arr[0])
expected = arr._data.dtype.type
assert isinstance(result, expected)
@@ -227,34 +227,34 @@
msg = f"'value' should be a {self.scalar_type.__name__}."
with pytest.raises(ValueError, match=msg):
arr._unbox_scalar("foo")
def test_check_compatible_with(self):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
arr._check_compatible_with(arr[0])
arr._check_compatible_with(arr[:1])
arr._check_compatible_with(NaT)
def test_scalar_from_string(self):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
result = arr._scalar_from_string(str(arr[0]))
assert result == arr[0]
def test_reduce_invalid(self):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
msg = f"'{type(arr).__name__}' does not implement reduction 'not a method'"
with pytest.raises(TypeError, match=msg):
arr._reduce("not a method")
@pytest.mark.parametrize("method", ["pad", "backfill"])
def test_fillna_method_doesnt_change_orig(self, method):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
arr[4] = NaT
fill_value = arr[3] if method == "pad" else arr[5]
@@ -263,11 +263,11 @@
# check that the original was not changed
assert arr[4] is NaT
def test_searchsorted(self):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
# scalar
result = arr.searchsorted(arr[1])
assert result == 1
@@ -413,15 +413,15 @@
)
assert result == expected
def test_setitem(self):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
arr[0] = arr[1]
- expected = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ expected = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
expected[0] = expected[1]
tm.assert_numpy_array_equal(arr.asi8, expected)
arr[:2] = arr[-2:]
@@ -502,11 +502,11 @@
arr1d[:] = cat[::-1]
tm.assert_equal(arr1d, expected)
def test_setitem_raises(self):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
val = arr[0]
with pytest.raises(IndexError, match="index 12 is out of bounds"):
arr[12] = val
@@ -538,11 +538,11 @@
with pytest.raises(TypeError, match=msg):
arr1d[:2] = box([0.0, 1.0])
def test_inplace_arithmetic(self):
# GH#24115 check that iadd and isub are actually in-place
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
expected = arr + pd.Timedelta(days=1)
arr += pd.Timedelta(days=1)
tm.assert_equal(arr, expected)
@@ -551,11 +551,11 @@
arr -= pd.Timedelta(days=1)
tm.assert_equal(arr, expected)
def test_shift_fill_int_deprecated(self):
# GH#31971
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
msg = "Passing <class 'int'> to shift"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = arr.shift(1, fill_value=1)
--- a/pandas:pandas/tests/arrays/test_datetimes.py
+++ b/pandas:pandas/tests/arrays/test_datetimes.py
@@ -276,11 +276,11 @@
)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("index", [True, False])
def test_searchsorted_different_tz(self, index):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = DatetimeArray(data, freq="D").tz_localize("Asia/Tokyo")
if index:
arr = pd.Index(arr)
expected = arr.searchsorted(arr[2])
@@ -291,11 +291,11 @@
result = arr.searchsorted(arr[2:6].tz_convert("UTC"))
tm.assert_equal(result, expected)
@pytest.mark.parametrize("index", [True, False])
def test_searchsorted_tzawareness_compat(self, index):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = DatetimeArray(data, freq="D")
if index:
arr = pd.Index(arr)
mismatch = arr.tz_localize("Asia/Tokyo")
@@ -318,18 +318,18 @@
np.int64(1),
1.0,
np.timedelta64("NaT"),
pd.Timedelta(days=2),
"invalid",
- np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9,
- np.arange(10).view("timedelta64[ns]") * 24 * 3600 * 10 ** 9,
+ np.arange(10, dtype="i8") * 24 * 3600 * 10**9,
+ np.arange(10).view("timedelta64[ns]") * 24 * 3600 * 10**9,
pd.Timestamp.now().to_period("D"),
],
)
@pytest.mark.parametrize("index", [True, False])
def test_searchsorted_invalid_types(self, other, index):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = DatetimeArray(data, freq="D")
if index:
arr = pd.Index(arr)
msg = "|".join(
--- a/pandas:pandas/tests/arrays/test_timedeltas.py
+++ b/pandas:pandas/tests/arrays/test_timedeltas.py
@@ -55,18 +55,18 @@
np.int64(1),
1.0,
np.datetime64("NaT"),
pd.Timestamp.now(),
"invalid",
- np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9,
- (np.arange(10) * 24 * 3600 * 10 ** 9).view("datetime64[ns]"),
+ np.arange(10, dtype="i8") * 24 * 3600 * 10**9,
+ (np.arange(10) * 24 * 3600 * 10**9).view("datetime64[ns]"),
pd.Timestamp.now().to_period("D"),
],
)
@pytest.mark.parametrize("index", [True, False])
def test_searchsorted_invalid_types(self, other, index):
- data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
+ data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = TimedeltaArray(data, freq="D")
if index:
arr = pd.Index(arr)
msg = "|".join(
@@ -79,37 +79,37 @@
arr.searchsorted(other)
class TestUnaryOps:
def test_abs(self):
- vals = np.array([-3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]")
+ vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")
arr = TimedeltaArray(vals)
- evals = np.array([3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]")
+ evals = np.array([3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")
expected = TimedeltaArray(evals)
result = abs(arr)
tm.assert_timedelta_array_equal(result, expected)
result2 = np.abs(arr)
tm.assert_timedelta_array_equal(result2, expected)
def test_pos(self):
- vals = np.array([-3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]")
+ vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")
arr = TimedeltaArray(vals)
result = +arr
tm.assert_timedelta_array_equal(result, arr)
result2 = np.positive(arr)
tm.assert_timedelta_array_equal(result2, arr)
def test_neg(self):
- vals = np.array([-3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]")
+ vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")
arr = TimedeltaArray(vals)
- evals = np.array([3600 * 10 ** 9, "NaT", -7200 * 10 ** 9], dtype="m8[ns]")
+ evals = np.array([3600 * 10**9, "NaT", -7200 * 10**9], dtype="m8[ns]")
expected = TimedeltaArray(evals)
result = -arr
tm.assert_timedelta_array_equal(result, expected)
--- a/pandas:pandas/tests/arrays/timedeltas/test_constructors.py
+++ b/pandas:pandas/tests/arrays/timedeltas/test_constructors.py
@@ -17,11 +17,11 @@
# 0-dim
TimedeltaArray(arr[[0]].squeeze())
def test_freq_validation(self):
# ensure that the public constructor cannot create an invalid instance
- arr = np.array([0, 0, 1], dtype=np.int64) * 3600 * 10 ** 9
+ arr = np.array([0, 0, 1], dtype=np.int64) * 3600 * 10**9
msg = (
"Inferred frequency None from passed values does not "
"conform to passed frequency D"
)
--- a/pandas:pandas/tests/base/test_conversion.py
+++ b/pandas:pandas/tests/base/test_conversion.py
@@ -207,11 +207,11 @@
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values")],
),
pytest.param(
- pd.TimedeltaIndex([10 ** 10]),
+ pd.TimedeltaIndex([10**10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values")],
),
],
--- a/pandas:pandas/tests/dtypes/test_inference.py
+++ b/pandas:pandas/tests/dtypes/test_inference.py
@@ -531,29 +531,29 @@
arr = np.array([[10.0, 2], 1.0, "apple"], dtype=object)
result, _ = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
- arr = np.array([2 ** 63], dtype=object)
- exp = np.array([2 ** 63], dtype=np.uint64)
+ arr = np.array([2**63], dtype=object)
+ exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
- arr = np.array([str(2 ** 63)], dtype=object)
- exp = np.array([2 ** 63], dtype=np.uint64)
+ arr = np.array([str(2**63)], dtype=object)
+ exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
- arr = np.array([np.uint64(2 ** 63)], dtype=object)
- exp = np.array([2 ** 63], dtype=np.uint64)
+ arr = np.array([np.uint64(2**63)], dtype=object)
+ exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
@pytest.mark.parametrize(
"arr",
[
- np.array([2 ** 63, np.nan], dtype=object),
- np.array([str(2 ** 63), np.nan], dtype=object),
- np.array([np.nan, 2 ** 63], dtype=object),
- np.array([np.nan, str(2 ** 63)], dtype=object),
+ np.array([2**63, np.nan], dtype=object),
+ np.array([str(2**63), np.nan], dtype=object),
+ np.array([np.nan, 2**63], dtype=object),
+ np.array([np.nan, str(2**63)], dtype=object),
],
)
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result, _ = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce)
@@ -561,41 +561,41 @@
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
def test_convert_numeric_uint64_nan_values(
self, coerce, convert_to_masked_nullable
):
- arr = np.array([2 ** 63, 2 ** 63 + 1], dtype=object)
- na_values = {2 ** 63}
+ arr = np.array([2**63, 2**63 + 1], dtype=object)
+ na_values = {2**63}
expected = (
- np.array([np.nan, 2 ** 63 + 1], dtype=float) if coerce else arr.copy()
+ np.array([np.nan, 2**63 + 1], dtype=float) if coerce else arr.copy()
)
result = lib.maybe_convert_numeric(
arr,
na_values,
coerce_numeric=coerce,
convert_to_masked_nullable=convert_to_masked_nullable,
)
if convert_to_masked_nullable and coerce:
expected = IntegerArray(
- np.array([0, 2 ** 63 + 1], dtype="u8"),
+ np.array([0, 2**63 + 1], dtype="u8"),
np.array([True, False], dtype="bool"),
)
result = IntegerArray(*result)
else:
result = result[0] # discard mask
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"case",
[
- np.array([2 ** 63, -1], dtype=object),
- np.array([str(2 ** 63), -1], dtype=object),
- np.array([str(2 ** 63), str(-1)], dtype=object),
- np.array([-1, 2 ** 63], dtype=object),
- np.array([-1, str(2 ** 63)], dtype=object),
- np.array([str(-1), str(2 ** 63)], dtype=object),
+ np.array([2**63, -1], dtype=object),
+ np.array([str(2**63), -1], dtype=object),
+ np.array([str(2**63), str(-1)], dtype=object),
+ np.array([-1, 2**63], dtype=object),
+ np.array([-1, str(2**63)], dtype=object),
+ np.array([str(-1), str(2**63)], dtype=object),
],
)
@pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
def test_convert_numeric_int64_uint64(
self, case, coerce, convert_to_masked_nullable
@@ -623,36 +623,36 @@
result = FloatingArray(*result)
else:
result = result[0]
assert np.isnan(result)
- @pytest.mark.parametrize("value", [-(2 ** 63) - 1, 2 ** 64])
+ @pytest.mark.parametrize("value", [-(2**63) - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
- arr = np.array([2 ** 63], dtype=object)
- exp = np.array([2 ** 63], dtype=np.uint64)
+ arr = np.array([2**63], dtype=object)
+ exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
- arr = np.array([np.uint64(2 ** 63)], dtype=object)
- exp = np.array([2 ** 63], dtype=np.uint64)
+ arr = np.array([np.uint64(2**63)], dtype=object)
+ exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
- arr = np.array([2 ** 63, -1], dtype=object)
- exp = np.array([2 ** 63, -1], dtype=object)
+ arr = np.array([2**63, -1], dtype=object)
+ exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_maybe_convert_objects_datetime(self):
# GH27438
arr = np.array(
--- a/pandas:pandas/tests/frame/conftest.py
+++ b/pandas:pandas/tests/frame/conftest.py
@@ -210,11 +210,11 @@
Fixture for DataFrame with uint64 values
Columns are ['A', 'B']
"""
return DataFrame(
- {"A": np.arange(3), "B": [2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10]}, dtype=np.uint64
+ {"A": np.arange(3), "B": [2**63, 2**63 + 5, 2**63 + 10]}, dtype=np.uint64
)
@pytest.fixture
def simple_frame():
--- a/pandas:pandas/tests/frame/methods/test_cov_corr.py
+++ b/pandas:pandas/tests/frame/methods/test_cov_corr.py
@@ -340,16 +340,16 @@
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH#21925
df = DataFrame(np.random.random(size=(100, 3)))
- result = df.corrwith(df ** 2, method="spearman")
+ result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH#21925
df = DataFrame(np.random.random(size=(100, 3)))
- result = df.corrwith(df ** 2, method="kendall")
+ result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
--- a/pandas:pandas/tests/frame/methods/test_pipe.py
+++ b/pandas:pandas/tests/frame/methods/test_pipe.py
@@ -13,11 +13,11 @@
expected = DataFrame({"A": [1, 4, 9]})
if frame_or_series is Series:
obj = obj["A"]
expected = expected["A"]
- f = lambda x, y: x ** y
+ f = lambda x, y: x**y
result = obj.pipe(f, 2)
tm.assert_equal(result, expected)
def test_pipe_tuple(self, frame_or_series):
obj = DataFrame({"A": [1, 2, 3]})
--- a/pandas:pandas/tests/frame/methods/test_rank.py
+++ b/pandas:pandas/tests/frame/methods/test_rank.py
@@ -327,11 +327,11 @@
@pytest.mark.single
@pytest.mark.high_memory
def test_pct_max_many_rows(self):
# GH 18271
df = DataFrame(
- {"A": np.arange(2 ** 24 + 1), "B": np.arange(2 ** 24 + 1, 0, -1)}
+ {"A": np.arange(2**24 + 1), "B": np.arange(2**24 + 1, 0, -1)}
)
result = df.rank(pct=True).max()
assert (result == 1).all()
@pytest.mark.parametrize(
--- a/pandas:pandas/tests/frame/methods/test_reset_index.py
+++ b/pandas:pandas/tests/frame/methods/test_reset_index.py
@@ -26,11 +26,11 @@
class TestResetIndex:
def test_set_reset(self):
- idx = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10], name="foo")
+ idx = Index([2**63, 2**63 + 5, 2**63 + 10], name="foo")
# set/reset
df = DataFrame({"A": [0, 1, 2]}, index=idx)
result = df.reset_index()
assert result["foo"].dtype == np.dtype("uint64")
@@ -218,11 +218,11 @@
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series(
- (9.81 * time ** 2) / 2, index=Index(time, name="time"), name="speed"
+ (9.81 * time**2) / 2, index=Index(time, name="time"), name="speed"
)
df = DataFrame(s1)
reset = s1.reset_index()
assert reset["time"].dtype == np.float64
--- a/pandas:pandas/tests/frame/test_arithmetic.py
+++ b/pandas:pandas/tests/frame/test_arithmetic.py
@@ -815,17 +815,17 @@
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
- _test_op(df, lambda x, y: y ** x)
+ _test_op(df, lambda x, y: y**x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
- _test_op(df, lambda x, y: x ** y)
+ _test_op(df, lambda x, y: x**y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
@@ -894,13 +894,13 @@
@pytest.mark.parametrize(
"value, dtype",
[
(1, "i8"),
(1.0, "f8"),
- (2 ** 63, "f8"),
+ (2**63, "f8"),
(1j, "complex128"),
- (2 ** 63, "complex128"),
+ (2**63, "complex128"),
(True, "bool"),
(np.timedelta64(20, "ns"), "<m8[ns]"),
(np.datetime64(20, "ns"), "<M8[ns]"),
],
)
@@ -1714,11 +1714,11 @@
def test_pow_with_realignment():
# GH#32685 pow has special semantics for operating with null values
left = DataFrame({"A": [0, 1, 2]})
right = DataFrame(index=[0, 1, 2])
- result = left ** right
+ result = left**right
expected = DataFrame({"A": [np.nan, 1.0, np.nan]})
tm.assert_frame_equal(result, expected)
# TODO: move to tests.arithmetic and parametrize
@@ -1726,11 +1726,11 @@
left = DataFrame({"A": [np.nan, np.nan, np.nan]})
right = DataFrame({"A": [0, 0, 0]})
expected = DataFrame({"A": [1.0, 1.0, 1.0]})
- result = left ** right
+ result = left**right
tm.assert_frame_equal(result, expected)
result = left["A"] ** right["A"]
tm.assert_series_equal(result, expected["A"])
--- a/pandas:pandas/tests/frame/test_block_internals.py
+++ b/pandas:pandas/tests/frame/test_block_internals.py
@@ -117,18 +117,18 @@
tm.assert_almost_equal(expected, float_frame.values)
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
- df = DataFrame({"A": [2 ** 63 - 1]})
- result = df["A"]
- expected = Series(np.asarray([2 ** 63 - 1], np.int64), name="A")
- tm.assert_series_equal(result, expected)
-
- df = DataFrame({"A": [2 ** 63]})
- result = df["A"]
- expected = Series(np.asarray([2 ** 63], np.uint64), name="A")
+ df = DataFrame({"A": [2**63 - 1]})
+ result = df["A"]
+ expected = Series(np.asarray([2**63 - 1], np.int64), name="A")
+ tm.assert_series_equal(result, expected)
+
+ df = DataFrame({"A": [2**63]})
+ result = df["A"]
+ expected = Series(np.asarray([2**63], np.uint64), name="A")
tm.assert_series_equal(result, expected)
df = DataFrame({"A": [datetime(2005, 1, 1), True]})
result = df["A"]
expected = Series(
--- a/pandas:pandas/tests/frame/test_constructors.py
+++ b/pandas:pandas/tests/frame/test_constructors.py
@@ -370,11 +370,11 @@
df = DataFrame({0: np.ones(10, dtype=bool), 1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
- values = np.array([2 ** 64 - i for i in range(1, 10)], dtype=np.uint64)
+ values = np.array([2**64 - i for i in range(1, 10)], dtype=np.uint64)
result = DataFrame({"a": values})
assert result["a"].dtype == np.uint64
# see gh-2355
@@ -392,16 +392,16 @@
assert df_crawls["uid"].dtype == np.uint64
@pytest.mark.parametrize(
"values",
[
- np.array([2 ** 64], dtype=object),
- np.array([2 ** 65]),
- [2 ** 64 + 1],
- np.array([-(2 ** 63) - 4], dtype=object),
- np.array([-(2 ** 64) - 1]),
- [-(2 ** 65) - 2],
+ np.array([2**64], dtype=object),
+ np.array([2**65]),
+ [2**64 + 1],
+ np.array([-(2**63) - 4], dtype=object),
+ np.array([-(2**64) - 1]),
+ [-(2**65) - 2],
],
)
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
@@ -2041,11 +2041,11 @@
result = df.dtypes
expected = Series([np.dtype("int32")] * 5)
tm.assert_series_equal(result, expected)
# overflow issue? (we always expected int64 upcasting here)
- df = DataFrame({"a": [2 ** 31, 2 ** 31 + 1]})
+ df = DataFrame({"a": [2**31, 2**31 + 1]})
assert df.dtypes.iloc[0] == np.dtype("int64")
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
--- a/pandas:pandas/tests/frame/test_stack_unstack.py
+++ b/pandas:pandas/tests/frame/test_stack_unstack.py
@@ -1821,11 +1821,11 @@
@pytest.mark.slow
def test_unstack_number_of_levels_larger_than_int32(self):
# GH#20601
df = DataFrame(
- np.random.randn(2 ** 16, 2), index=[np.arange(2 ** 16), np.arange(2 ** 16)]
+ np.random.randn(2**16, 2), index=[np.arange(2**16), np.arange(2**16)]
)
with pytest.raises(ValueError, match="int32 overflow"):
df.unstack()
def test_stack_order_with_unsorted_levels(self):
--- a/pandas:pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas:pandas/tests/groupby/aggregate/test_aggregate.py
@@ -227,14 +227,14 @@
@pytest.mark.parametrize(
"func, expected, dtype, result_dtype_dict",
[
("sum", [5, 7, 9], "int64", {}),
- ("std", [4.5 ** 0.5] * 3, int, {"i": float, "j": float, "k": float}),
+ ("std", [4.5**0.5] * 3, int, {"i": float, "j": float, "k": float}),
("var", [4.5] * 3, int, {"i": float, "j": float, "k": float}),
("sum", [5, 7, 9], "Int64", {"j": "int64"}),
- ("std", [4.5 ** 0.5] * 3, "Int64", {"i": float, "j": float, "k": float}),
+ ("std", [4.5**0.5] * 3, "Int64", {"i": float, "j": float, "k": float}),
("var", [4.5] * 3, "Int64", {"i": "float64", "j": "float64", "k": "float64"}),
],
)
def test_multiindex_groupby_mixed_cols_axis1(func, expected, dtype, result_dtype_dict):
# GH#43209
@@ -252,11 +252,11 @@
@pytest.mark.parametrize(
"func, expected_data, result_dtype_dict",
[
("sum", [[2, 4], [10, 12], [18, 20]], {10: "int64", 20: "int64"}),
# std should ideally return Int64 / Float64 #43330
- ("std", [[2 ** 0.5] * 2] * 3, "float64"),
+ ("std", [[2**0.5] * 2] * 3, "float64"),
("var", [[2] * 2] * 3, {10: "float64", 20: "float64"}),
],
)
def test_groupby_mixed_cols_axis1(func, expected_data, result_dtype_dict):
# GH#43209
@@ -1101,20 +1101,20 @@
)
# check pd.NameAgg case
result1 = df.groupby(by="kind").agg(
height_sqr_min=pd.NamedAgg(
- column="height", aggfunc=lambda x: np.min(x ** 2)
+ column="height", aggfunc=lambda x: np.min(x**2)
),
height_max=pd.NamedAgg(column="height", aggfunc="max"),
weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
)
tm.assert_frame_equal(result1, expected)
# check agg(key=(col, aggfunc)) case
result2 = df.groupby(by="kind").agg(
- height_sqr_min=("height", lambda x: np.min(x ** 2)),
+ height_sqr_min=("height", lambda x: np.min(x**2)),
height_max=("height", "max"),
weight_max=("weight", "max"),
)
tm.assert_frame_equal(result2, expected)
@@ -1147,22 +1147,22 @@
columns=columns,
)
# check agg(key=(col, aggfunc)) case
result1 = df.groupby(by="kind").agg(
- height_sqr_min=("height", lambda x: np.min(x ** 2)),
+ height_sqr_min=("height", lambda x: np.min(x**2)),
height_max=("height", "max"),
weight_max=("weight", "max"),
height_max_2=("height", lambda x: np.max(x)),
weight_min=("weight", lambda x: np.min(x)),
)
tm.assert_frame_equal(result1, expected)
# check pd.NamedAgg case
result2 = df.groupby(by="kind").agg(
height_sqr_min=pd.NamedAgg(
- column="height", aggfunc=lambda x: np.min(x ** 2)
+ column="height", aggfunc=lambda x: np.min(x**2)
),
height_max=pd.NamedAgg(column="height", aggfunc="max"),
weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
height_max_2=pd.NamedAgg(column="height", aggfunc=lambda x: np.max(x)),
weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)),
--- a/pandas:pandas/tests/groupby/test_function.py
+++ b/pandas:pandas/tests/groupby/test_function.py
@@ -854,11 +854,11 @@
@td.skip_if_32bit
@pytest.mark.parametrize("method", ["cummin", "cummax"])
@pytest.mark.parametrize(
- "dtype,val", [("UInt64", np.iinfo("uint64").max), ("Int64", 2 ** 53 + 1)]
+ "dtype,val", [("UInt64", np.iinfo("uint64").max), ("Int64", 2**53 + 1)]
)
def test_nullable_int_not_cast_as_float(method, dtype, val):
data = [val, pd.NA]
df = DataFrame({"grp": [1, 1], "b": data}, dtype=dtype)
grouped = df.groupby("grp")
--- a/pandas:pandas/tests/groupby/test_libgroupby.py
+++ b/pandas:pandas/tests/groupby/test_libgroupby.py
@@ -110,17 +110,17 @@
def test_group_var_large_inputs(self):
prng = np.random.RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
- values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
- values.shape = (10 ** 6, 1)
- labels = np.zeros(10 ** 6, dtype="intp")
-
- self.algo(out, counts, values, labels)
-
- assert counts[0] == 10 ** 6
+ values = (prng.rand(10**6) + 10**12).astype(self.dtype)
+ values.shape = (10**6, 1)
+ labels = np.zeros(10**6, dtype="intp")
+
+ self.algo(out, counts, values, labels)
+
+ assert counts[0] == 10**6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
--- a/pandas:pandas/tests/groupby/test_pipe.py
+++ b/pandas:pandas/tests/groupby/test_pipe.py
@@ -25,11 +25,11 @@
def f(dfgb):
return dfgb.B.max() - dfgb.C.min().min()
def square(srs):
- return srs ** 2
+ return srs**2
# Note that the transformations are
# GroupBy -> Series
# Series -> Series
# This then chains the GroupBy.pipe and the
--- a/pandas:pandas/tests/indexes/base_class/test_indexing.py
+++ b/pandas:pandas/tests/indexes/base_class/test_indexing.py
@@ -49,11 +49,11 @@
# _bin_search vs ndarray.searchsorted makes a difference
lev = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
dti = pd.date_range("2016-01-01", periods=100)
- mi = pd.MultiIndex.from_product([lev, range(10 ** 3), dti])
+ mi = pd.MultiIndex.from_product([lev, range(10**3), dti])
oidx = mi.to_flat_index()
loc = len(oidx) // 2
tup = oidx[loc]
--- a/pandas:pandas/tests/indexes/datetimelike_/test_equals.py
+++ b/pandas:pandas/tests/indexes/datetimelike_/test_equals.py
@@ -165,11 +165,11 @@
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# Check that we dont raise OverflowError on comparisons outside the
# implementation range
- oob = Index([timedelta(days=10 ** 6)] * 3, dtype=object)
+ oob = Index([timedelta(days=10**6)] * 3, dtype=object)
assert not idx.equals(oob)
assert not idx2.equals(oob)
# FIXME: oob.apply(np.timedelta64) incorrectly overflows
oob2 = Index([np.timedelta64(x) for x in oob], dtype=object)
--- a/pandas:pandas/tests/indexes/datetimelike_/test_indexing.py
+++ b/pandas:pandas/tests/indexes/datetimelike_/test_indexing.py
@@ -18,11 +18,11 @@
@pytest.mark.parametrize("ldtype", dtlike_dtypes)
@pytest.mark.parametrize("rdtype", dtlike_dtypes)
def test_get_indexer_non_unique_wrong_dtype(ldtype, rdtype):
- vals = np.tile(3600 * 10 ** 9 * np.arange(3), 2)
+ vals = np.tile(3600 * 10**9 * np.arange(3), 2)
def construct(dtype):
if dtype is dtlike_dtypes[-1]:
# PeriodArray will try to cast ints to strings
return DatetimeIndex(vals).astype(dtype)
--- a/pandas:pandas/tests/indexes/interval/test_interval_tree.py
+++ b/pandas:pandas/tests/indexes/interval/test_interval_tree.py
@@ -56,11 +56,11 @@
):
tree.get_indexer(np.array([3.0]))
@pytest.mark.parametrize(
"dtype, target_value, target_dtype",
- [("int64", 2 ** 63 + 1, "uint64"), ("uint64", -1, "int64")],
+ [("int64", 2**63 + 1, "uint64"), ("uint64", -1, "int64")],
)
def test_get_indexer_overflow(self, dtype, target_value, target_dtype):
left, right = np.array([0, 1], dtype=dtype), np.array([1, 2], dtype=dtype)
tree = IntervalTree(left, right)
@@ -87,11 +87,11 @@
expected = np.array([2], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"dtype, target_value, target_dtype",
- [("int64", 2 ** 63 + 1, "uint64"), ("uint64", -1, "int64")],
+ [("int64", 2**63 + 1, "uint64"), ("uint64", -1, "int64")],
)
def test_get_indexer_non_unique_overflow(self, dtype, target_value, target_dtype):
left, right = np.array([0, 2], dtype=dtype), np.array([1, 3], dtype=dtype)
tree = IntervalTree(left, right)
target = np.array([target_value], dtype=target_dtype)
--- a/pandas:pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas:pandas/tests/indexes/multi/test_indexing.py
@@ -787,12 +787,12 @@
assert "0 day 09:30:00" in idx
@pytest.mark.slow
def test_large_mi_contains(self):
# GH#10645
- result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])
- assert not (10 ** 6, 0) in result
+ result = MultiIndex.from_arrays([range(10**6), range(10**6)])
+ assert not (10**6, 0) in result
def test_timestamp_multiindex_indexer():
# https://github.com/pandas-dev/pandas/issues/26944
idx = MultiIndex.from_product(
--- a/pandas:pandas/tests/indexes/multi/test_integrity.py
+++ b/pandas:pandas/tests/indexes/multi/test_integrity.py
@@ -50,11 +50,11 @@
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
- ints = np.arange(10 ** 18, 10 ** 18 + 5)
+ ints = np.arange(10**18, 10**18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz="US/Central")
idx = MultiIndex.from_arrays([naive, aware])
--- a/pandas:pandas/tests/indexes/numeric/test_indexing.py
+++ b/pandas:pandas/tests/indexes/numeric/test_indexing.py
@@ -16,11 +16,11 @@
@pytest.fixture
def index_large():
# large values used in UInt64Index tests where no compat needed with Int64/Float64
- large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25]
+ large = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25]
return UInt64Index(large)
class TestGetLoc:
@pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"])
@@ -155,11 +155,11 @@
idx.get_loc(np.nan, method=method)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_get_loc_numericindex_none_raises(self, dtype):
# case that goes through searchsorted and key is non-comparable to values
- arr = np.arange(10 ** 7, dtype=dtype)
+ arr = np.arange(10**7, dtype=dtype)
idx = Index(arr)
with pytest.raises(KeyError, match="None"):
idx.get_loc(None)
def test_get_loc_overflows(self):
@@ -370,21 +370,21 @@
indexer = index.get_indexer(target, method="backfill")
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_uint64(self, index_large):
- target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2 ** 63)
+ target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2**63)
indexer = index_large.get_indexer(target)
expected = np.array([0, -1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
- target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2 ** 63)
+ target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2**63)
indexer = index_large.get_indexer(target, method="pad")
expected = np.array([0, 0, 1, 2, 3, 4, 4, 4, 4, 4], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
- target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2 ** 63)
+ target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2**63)
indexer = index_large.get_indexer(target, method="backfill")
expected = np.array([0, 1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
--- a/pandas:pandas/tests/indexes/numeric/test_join.py
+++ b/pandas:pandas/tests/indexes/numeric/test_join.py
@@ -196,17 +196,17 @@
class TestJoinUInt64Index:
@pytest.fixture
def index_large(self):
# large values used in TestUInt64Index where no compat needed with Int64/Float64
- large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25]
+ large = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25]
return UInt64Index(large)
def test_join_inner(self, index_large):
- other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
+ other = UInt64Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
other_mono = UInt64Index(
- 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")
+ 2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")
)
# not monotonic
res, lidx, ridx = index_large.join(other, how="inner", return_indexers=True)
@@ -214,11 +214,11 @@
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
- eres = UInt64Index(2 ** 63 + np.array([10, 25], dtype="uint64"))
+ eres = UInt64Index(2**63 + np.array([10, 25], dtype="uint64"))
elidx = np.array([1, 4], dtype=np.intp)
eridx = np.array([5, 2], dtype=np.intp)
assert isinstance(res, UInt64Index)
tm.assert_index_equal(res, eres)
@@ -240,13 +240,13 @@
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self, index_large):
- other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
+ other = UInt64Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
other_mono = UInt64Index(
- 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")
+ 2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")
)
# not monotonic
res, lidx, ridx = index_large.join(other, how="left", return_indexers=True)
eres = index_large
@@ -265,27 +265,27 @@
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# non-unique
- idx = UInt64Index(2 ** 63 + np.array([1, 1, 2, 5], dtype="uint64"))
- idx2 = UInt64Index(2 ** 63 + np.array([1, 2, 5, 7, 9], dtype="uint64"))
+ idx = UInt64Index(2**63 + np.array([1, 1, 2, 5], dtype="uint64"))
+ idx2 = UInt64Index(2**63 + np.array([1, 2, 5, 7, 9], dtype="uint64"))
res, lidx, ridx = idx2.join(idx, how="left", return_indexers=True)
# 1 is in idx2, so it should be x2
- eres = UInt64Index(2 ** 63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64"))
+ eres = UInt64Index(2**63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64"))
eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)
elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_right(self, index_large):
- other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
+ other = UInt64Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
other_mono = UInt64Index(
- 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")
+ 2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")
)
# not monotonic
res, lidx, ridx = index_large.join(other, how="right", return_indexers=True)
eres = other
@@ -307,39 +307,39 @@
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_index_equal(res, eres)
assert ridx is None
# non-unique
- idx = UInt64Index(2 ** 63 + np.array([1, 1, 2, 5], dtype="uint64"))
- idx2 = UInt64Index(2 ** 63 + np.array([1, 2, 5, 7, 9], dtype="uint64"))
+ idx = UInt64Index(2**63 + np.array([1, 1, 2, 5], dtype="uint64"))
+ idx2 = UInt64Index(2**63 + np.array([1, 2, 5, 7, 9], dtype="uint64"))
res, lidx, ridx = idx.join(idx2, how="right", return_indexers=True)
# 1 is in idx2, so it should be x2
- eres = UInt64Index(2 ** 63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64"))
+ eres = UInt64Index(2**63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64"))
elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)
eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_non_int_index(self, index_large):
other = Index(
- 2 ** 63 + np.array([1, 5, 7, 10, 20], dtype="uint64"), dtype=object
+ 2**63 + np.array([1, 5, 7, 10, 20], dtype="uint64"), dtype=object
)
outer = index_large.join(other, how="outer")
outer2 = other.join(index_large, how="outer")
expected = Index(
- 2 ** 63 + np.array([0, 1, 5, 7, 10, 15, 20, 25], dtype="uint64")
+ 2**63 + np.array([0, 1, 5, 7, 10, 15, 20, 25], dtype="uint64")
)
tm.assert_index_equal(outer, outer2)
tm.assert_index_equal(outer, expected)
inner = index_large.join(other, how="inner")
inner2 = other.join(index_large, how="inner")
- expected = Index(2 ** 63 + np.array([10, 20], dtype="uint64"))
+ expected = Index(2**63 + np.array([10, 20], dtype="uint64"))
tm.assert_index_equal(inner, inner2)
tm.assert_index_equal(inner, expected)
left = index_large.join(other, how="left")
tm.assert_index_equal(left, index_large.astype(object))
@@ -352,23 +352,23 @@
right2 = other.join(index_large, how="right")
tm.assert_index_equal(right2, index_large.astype(object))
def test_join_outer(self, index_large):
- other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
+ other = UInt64Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
other_mono = UInt64Index(
- 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")
+ 2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")
)
# not monotonic
# guarantee of sortedness
res, lidx, ridx = index_large.join(other, how="outer", return_indexers=True)
noidx_res = index_large.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
eres = UInt64Index(
- 2 ** 63 + np.array([0, 1, 2, 7, 10, 12, 15, 20, 25], dtype="uint64")
+ 2**63 + np.array([0, 1, 2, 7, 10, 12, 15, 20, 25], dtype="uint64")
)
elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp)
eridx = np.array([-1, 3, 4, 0, 5, 1, -1, -1, 2], dtype=np.intp)
assert isinstance(res, UInt64Index)
--- a/pandas:pandas/tests/indexes/numeric/test_numeric.py
+++ b/pandas:pandas/tests/indexes/numeric/test_numeric.py
@@ -561,12 +561,12 @@
# compat with shared Int64/Float64 tests
return self._index_cls(np.arange(5, dtype=dtype))
@pytest.fixture(
params=[
- [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25],
- [2 ** 63 + 25, 2 ** 63 + 20, 2 ** 63 + 15, 2 ** 63 + 10, 2 ** 63],
+ [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25],
+ [2**63 + 25, 2**63 + 20, 2**63 + 15, 2**63 + 10, 2**63],
],
ids=["index_inc", "index_dec"],
)
def index(self, request):
return self._index_cls(request.param, dtype=np.uint64)
@@ -592,25 +592,25 @@
idx = index_cls([1, 2, 3])
res = Index([1, 2, 3], dtype=dtype)
tm.assert_index_equal(res, idx, exact=exact)
- idx = index_cls([1, 2 ** 63])
- res = Index([1, 2 ** 63], dtype=dtype)
+ idx = index_cls([1, 2**63])
+ res = Index([1, 2**63], dtype=dtype)
tm.assert_index_equal(res, idx, exact=exact)
- idx = index_cls([1, 2 ** 63])
- res = Index([1, 2 ** 63])
+ idx = index_cls([1, 2**63])
+ res = Index([1, 2**63])
tm.assert_index_equal(res, idx, exact=exact)
- idx = Index([-1, 2 ** 63], dtype=object)
- res = Index(np.array([-1, 2 ** 63], dtype=object))
+ idx = Index([-1, 2**63], dtype=object)
+ res = Index(np.array([-1, 2**63], dtype=object))
tm.assert_index_equal(res, idx, exact=exact)
# https://github.com/pandas-dev/pandas/issues/29526
- idx = index_cls([1, 2 ** 63 + 1], dtype=dtype)
- res = Index([1, 2 ** 63 + 1], dtype=dtype)
+ idx = index_cls([1, 2**63 + 1], dtype=dtype)
+ res = Index([1, 2**63 + 1], dtype=dtype)
tm.assert_index_equal(res, idx, exact=exact)
def test_constructor_does_not_cast_to_float(self):
# https://github.com/numpy/numpy/issues/19146
values = [0, np.iinfo(np.uint64).max]
--- a/pandas:pandas/tests/indexes/numeric/test_setops.py
+++ b/pandas:pandas/tests/indexes/numeric/test_setops.py
@@ -17,11 +17,11 @@
@pytest.fixture
def index_large():
# large values used in TestUInt64Index where no compat needed with Int64/Float64
- large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25]
+ large = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25]
return UInt64Index(large)
class TestSetOps:
@pytest.mark.parametrize("dtype", ["f8", "u8", "i8"])
@@ -87,11 +87,11 @@
result = string_index.difference(float_index)
tm.assert_index_equal(result, string_index)
def test_intersection_uint64_outside_int64_range(self, index_large):
- other = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20])
+ other = Index([2**63, 2**63 + 5, 2**63 + 10, 2**63 + 15, 2**63 + 20])
result = index_large.intersection(other)
expected = Index(np.sort(np.intersect1d(index_large.values, other.values)))
tm.assert_index_equal(result, expected)
result = other.intersection(index_large)
--- a/pandas:pandas/tests/indexes/timedeltas/test_constructors.py
+++ b/pandas:pandas/tests/indexes/timedeltas/test_constructors.py
@@ -49,11 +49,11 @@
def test_infer_from_tdi(self):
# GH#23539
# fast-path for inferring a frequency if the passed data already
# has one
- tdi = timedelta_range("1 second", periods=10 ** 7, freq="1s")
+ tdi = timedelta_range("1 second", periods=10**7, freq="1s")
result = TimedeltaIndex(tdi, freq="infer")
assert result.freq == tdi.freq
# check that inferred_freq was not called by checking that the
--- a/pandas:pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas:pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -103,11 +103,11 @@
def test_freq_conversion_always_floating(self):
# even if we have no NaTs, we get back float64; this matches TDA and Series
tdi = timedelta_range("1 Day", periods=30)
res = tdi.astype("m8[s]")
- expected = Index((tdi.view("i8") / 10 ** 9).astype(np.float64))
+ expected = Index((tdi.view("i8") / 10**9).astype(np.float64))
tm.assert_index_equal(res, expected)
# check this matches Series and TimedeltaArray
res = tdi._data.astype("m8[s]")
tm.assert_numpy_array_equal(res, expected._values)
--- a/pandas:pandas/tests/indexing/test_coercion.py
+++ b/pandas:pandas/tests/indexing/test_coercion.py
@@ -120,11 +120,11 @@
exp = pd.Series([1, val, 3, 4])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
- "val,exp_dtype", [(np.int32(1), np.int8), (np.int16(2 ** 9), np.int16)]
+ "val,exp_dtype", [(np.int32(1), np.int8), (np.int16(2**9), np.int16)]
)
def test_setitem_series_int8(self, val, exp_dtype, request):
obj = pd.Series([1, 2, 3, 4], dtype=np.int8)
assert obj.dtype == np.int8
--- a/pandas:pandas/tests/indexing/test_iloc.py
+++ b/pandas:pandas/tests/indexing/test_iloc.py
@@ -732,11 +732,11 @@
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
- nums = 2 ** locs
+ nums = 2**locs
reps = [bin(num) for num in nums]
df = DataFrame({"locs": locs, "nums": nums}, reps)
expected = {
(None, ""): "0b1100",
--- a/pandas:pandas/tests/indexing/test_loc.py
+++ b/pandas:pandas/tests/indexing/test_loc.py
@@ -1270,15 +1270,15 @@
expected = df.loc[Timedelta("0s") :, :]
result = df.loc["0s":, :]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
- "val,expected", [(2 ** 63 - 1, Series([1])), (2 ** 63, Series([2]))]
+ "val,expected", [(2**63 - 1, Series([1])), (2**63, Series([2]))]
)
def test_loc_getitem_uint64_scalar(self, val, expected):
# see GH#19399
- df = DataFrame([1, 2], index=[2 ** 63 - 1, 2 ** 63])
+ df = DataFrame([1, 2], index=[2**63 - 1, 2**63])
result = df.loc[val]
expected.name = val
tm.assert_series_equal(result, expected)
@@ -1788,13 +1788,13 @@
class TestLocSetitemWithExpansion:
@pytest.mark.slow
def test_loc_setitem_with_expansion_large_dataframe(self):
# GH#10692
- result = DataFrame({"x": range(10 ** 6)}, dtype="int64")
+ result = DataFrame({"x": range(10**6)}, dtype="int64")
result.loc[len(result)] = len(result) + 1
- expected = DataFrame({"x": range(10 ** 6 + 1)}, dtype="int64")
+ expected = DataFrame({"x": range(10**6 + 1)}, dtype="int64")
tm.assert_frame_equal(result, expected)
def test_loc_setitem_empty_series(self):
# GH#5226
@@ -2677,14 +2677,14 @@
result2 = ser.loc[lev1[0], lev2[0], lev3[0]]
assert result2 == 6
class TestLocSeries:
- @pytest.mark.parametrize("val,expected", [(2 ** 63 - 1, 3), (2 ** 63, 4)])
+ @pytest.mark.parametrize("val,expected", [(2**63 - 1, 3), (2**63, 4)])
def test_loc_uint64(self, val, expected):
# see GH#19399
- ser = Series({2 ** 63 - 1: 3, 2 ** 63: 4})
+ ser = Series({2**63 - 1: 3, 2**63: 4})
assert ser.loc[val] == expected
def test_loc_getitem(self, string_series, datetime_series):
inds = string_series.index[[3, 4, 7]]
tm.assert_series_equal(string_series.loc[inds], string_series.reindex(inds))
--- a/pandas:pandas/tests/io/excel/test_writers.py
+++ b/pandas:pandas/tests/io/excel/test_writers.py
@@ -332,12 +332,12 @@
@pytest.mark.usefixtures("set_engine")
class TestExcelWriter:
def test_excel_sheet_size(self, path):
# GH 26080
- breaking_row_count = 2 ** 20 + 1
- breaking_col_count = 2 ** 14 + 1
+ breaking_row_count = 2**20 + 1
+ breaking_col_count = 2**14 + 1
# purposely using two arrays to prevent memory issues while testing
row_arr = np.zeros(shape=(breaking_row_count, 1))
col_arr = np.zeros(shape=(1, breaking_col_count))
row_df = DataFrame(row_arr)
col_df = DataFrame(col_arr)
--- a/pandas:pandas/tests/io/formats/test_eng_formatting.py
+++ b/pandas:pandas/tests/io/formats/test_eng_formatting.py
@@ -54,119 +54,119 @@
def test_exponents_with_eng_prefix(self):
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
f = np.sqrt(2)
in_out = [
- (f * 10 ** -24, " 1.414y"),
- (f * 10 ** -23, " 14.142y"),
- (f * 10 ** -22, " 141.421y"),
- (f * 10 ** -21, " 1.414z"),
- (f * 10 ** -20, " 14.142z"),
- (f * 10 ** -19, " 141.421z"),
- (f * 10 ** -18, " 1.414a"),
- (f * 10 ** -17, " 14.142a"),
- (f * 10 ** -16, " 141.421a"),
- (f * 10 ** -15, " 1.414f"),
- (f * 10 ** -14, " 14.142f"),
- (f * 10 ** -13, " 141.421f"),
- (f * 10 ** -12, " 1.414p"),
- (f * 10 ** -11, " 14.142p"),
- (f * 10 ** -10, " 141.421p"),
- (f * 10 ** -9, " 1.414n"),
- (f * 10 ** -8, " 14.142n"),
- (f * 10 ** -7, " 141.421n"),
- (f * 10 ** -6, " 1.414u"),
- (f * 10 ** -5, " 14.142u"),
- (f * 10 ** -4, " 141.421u"),
- (f * 10 ** -3, " 1.414m"),
- (f * 10 ** -2, " 14.142m"),
- (f * 10 ** -1, " 141.421m"),
- (f * 10 ** 0, " 1.414"),
- (f * 10 ** 1, " 14.142"),
- (f * 10 ** 2, " 141.421"),
- (f * 10 ** 3, " 1.414k"),
- (f * 10 ** 4, " 14.142k"),
- (f * 10 ** 5, " 141.421k"),
- (f * 10 ** 6, " 1.414M"),
- (f * 10 ** 7, " 14.142M"),
- (f * 10 ** 8, " 141.421M"),
- (f * 10 ** 9, " 1.414G"),
- (f * 10 ** 10, " 14.142G"),
- (f * 10 ** 11, " 141.421G"),
- (f * 10 ** 12, " 1.414T"),
- (f * 10 ** 13, " 14.142T"),
- (f * 10 ** 14, " 141.421T"),
- (f * 10 ** 15, " 1.414P"),
- (f * 10 ** 16, " 14.142P"),
- (f * 10 ** 17, " 141.421P"),
- (f * 10 ** 18, " 1.414E"),
- (f * 10 ** 19, " 14.142E"),
- (f * 10 ** 20, " 141.421E"),
- (f * 10 ** 21, " 1.414Z"),
- (f * 10 ** 22, " 14.142Z"),
- (f * 10 ** 23, " 141.421Z"),
- (f * 10 ** 24, " 1.414Y"),
- (f * 10 ** 25, " 14.142Y"),
- (f * 10 ** 26, " 141.421Y"),
+ (f * 10**-24, " 1.414y"),
+ (f * 10**-23, " 14.142y"),
+ (f * 10**-22, " 141.421y"),
+ (f * 10**-21, " 1.414z"),
+ (f * 10**-20, " 14.142z"),
+ (f * 10**-19, " 141.421z"),
+ (f * 10**-18, " 1.414a"),
+ (f * 10**-17, " 14.142a"),
+ (f * 10**-16, " 141.421a"),
+ (f * 10**-15, " 1.414f"),
+ (f * 10**-14, " 14.142f"),
+ (f * 10**-13, " 141.421f"),
+ (f * 10**-12, " 1.414p"),
+ (f * 10**-11, " 14.142p"),
+ (f * 10**-10, " 141.421p"),
+ (f * 10**-9, " 1.414n"),
+ (f * 10**-8, " 14.142n"),
+ (f * 10**-7, " 141.421n"),
+ (f * 10**-6, " 1.414u"),
+ (f * 10**-5, " 14.142u"),
+ (f * 10**-4, " 141.421u"),
+ (f * 10**-3, " 1.414m"),
+ (f * 10**-2, " 14.142m"),
+ (f * 10**-1, " 141.421m"),
+ (f * 10**0, " 1.414"),
+ (f * 10**1, " 14.142"),
+ (f * 10**2, " 141.421"),
+ (f * 10**3, " 1.414k"),
+ (f * 10**4, " 14.142k"),
+ (f * 10**5, " 141.421k"),
+ (f * 10**6, " 1.414M"),
+ (f * 10**7, " 14.142M"),
+ (f * 10**8, " 141.421M"),
+ (f * 10**9, " 1.414G"),
+ (f * 10**10, " 14.142G"),
+ (f * 10**11, " 141.421G"),
+ (f * 10**12, " 1.414T"),
+ (f * 10**13, " 14.142T"),
+ (f * 10**14, " 141.421T"),
+ (f * 10**15, " 1.414P"),
+ (f * 10**16, " 14.142P"),
+ (f * 10**17, " 141.421P"),
+ (f * 10**18, " 1.414E"),
+ (f * 10**19, " 14.142E"),
+ (f * 10**20, " 141.421E"),
+ (f * 10**21, " 1.414Z"),
+ (f * 10**22, " 14.142Z"),
+ (f * 10**23, " 141.421Z"),
+ (f * 10**24, " 1.414Y"),
+ (f * 10**25, " 14.142Y"),
+ (f * 10**26, " 141.421Y"),
]
self.compare_all(formatter, in_out)
def test_exponents_without_eng_prefix(self):
formatter = fmt.EngFormatter(accuracy=4, use_eng_prefix=False)
f = np.pi
in_out = [
- (f * 10 ** -24, " 3.1416E-24"),
- (f * 10 ** -23, " 31.4159E-24"),
- (f * 10 ** -22, " 314.1593E-24"),
- (f * 10 ** -21, " 3.1416E-21"),
- (f * 10 ** -20, " 31.4159E-21"),
- (f * 10 ** -19, " 314.1593E-21"),
- (f * 10 ** -18, " 3.1416E-18"),
- (f * 10 ** -17, " 31.4159E-18"),
- (f * 10 ** -16, " 314.1593E-18"),
- (f * 10 ** -15, " 3.1416E-15"),
- (f * 10 ** -14, " 31.4159E-15"),
- (f * 10 ** -13, " 314.1593E-15"),
- (f * 10 ** -12, " 3.1416E-12"),
- (f * 10 ** -11, " 31.4159E-12"),
- (f * 10 ** -10, " 314.1593E-12"),
- (f * 10 ** -9, " 3.1416E-09"),
- (f * 10 ** -8, " 31.4159E-09"),
- (f * 10 ** -7, " 314.1593E-09"),
- (f * 10 ** -6, " 3.1416E-06"),
- (f * 10 ** -5, " 31.4159E-06"),
- (f * 10 ** -4, " 314.1593E-06"),
- (f * 10 ** -3, " 3.1416E-03"),
- (f * 10 ** -2, " 31.4159E-03"),
- (f * 10 ** -1, " 314.1593E-03"),
- (f * 10 ** 0, " 3.1416E+00"),
- (f * 10 ** 1, " 31.4159E+00"),
- (f * 10 ** 2, " 314.1593E+00"),
- (f * 10 ** 3, " 3.1416E+03"),
- (f * 10 ** 4, " 31.4159E+03"),
- (f * 10 ** 5, " 314.1593E+03"),
- (f * 10 ** 6, " 3.1416E+06"),
- (f * 10 ** 7, " 31.4159E+06"),
- (f * 10 ** 8, " 314.1593E+06"),
- (f * 10 ** 9, " 3.1416E+09"),
- (f * 10 ** 10, " 31.4159E+09"),
- (f * 10 ** 11, " 314.1593E+09"),
- (f * 10 ** 12, " 3.1416E+12"),
- (f * 10 ** 13, " 31.4159E+12"),
- (f * 10 ** 14, " 314.1593E+12"),
- (f * 10 ** 15, " 3.1416E+15"),
- (f * 10 ** 16, " 31.4159E+15"),
- (f * 10 ** 17, " 314.1593E+15"),
- (f * 10 ** 18, " 3.1416E+18"),
- (f * 10 ** 19, " 31.4159E+18"),
- (f * 10 ** 20, " 314.1593E+18"),
- (f * 10 ** 21, " 3.1416E+21"),
- (f * 10 ** 22, " 31.4159E+21"),
- (f * 10 ** 23, " 314.1593E+21"),
- (f * 10 ** 24, " 3.1416E+24"),
- (f * 10 ** 25, " 31.4159E+24"),
- (f * 10 ** 26, " 314.1593E+24"),
+ (f * 10**-24, " 3.1416E-24"),
+ (f * 10**-23, " 31.4159E-24"),
+ (f * 10**-22, " 314.1593E-24"),
+ (f * 10**-21, " 3.1416E-21"),
+ (f * 10**-20, " 31.4159E-21"),
+ (f * 10**-19, " 314.1593E-21"),
+ (f * 10**-18, " 3.1416E-18"),
+ (f * 10**-17, " 31.4159E-18"),
+ (f * 10**-16, " 314.1593E-18"),
+ (f * 10**-15, " 3.1416E-15"),
+ (f * 10**-14, " 31.4159E-15"),
+ (f * 10**-13, " 314.1593E-15"),
+ (f * 10**-12, " 3.1416E-12"),
+ (f * 10**-11, " 31.4159E-12"),
+ (f * 10**-10, " 314.1593E-12"),
+ (f * 10**-9, " 3.1416E-09"),
+ (f * 10**-8, " 31.4159E-09"),
+ (f * 10**-7, " 314.1593E-09"),
+ (f * 10**-6, " 3.1416E-06"),
+ (f * 10**-5, " 31.4159E-06"),
+ (f * 10**-4, " 314.1593E-06"),
+ (f * 10**-3, " 3.1416E-03"),
+ (f * 10**-2, " 31.4159E-03"),
+ (f * 10**-1, " 314.1593E-03"),
+ (f * 10**0, " 3.1416E+00"),
+ (f * 10**1, " 31.4159E+00"),
+ (f * 10**2, " 314.1593E+00"),
+ (f * 10**3, " 3.1416E+03"),
+ (f * 10**4, " 31.4159E+03"),
+ (f * 10**5, " 314.1593E+03"),
+ (f * 10**6, " 3.1416E+06"),
+ (f * 10**7, " 31.4159E+06"),
+ (f * 10**8, " 314.1593E+06"),
+ (f * 10**9, " 3.1416E+09"),
+ (f * 10**10, " 31.4159E+09"),
+ (f * 10**11, " 314.1593E+09"),
+ (f * 10**12, " 3.1416E+12"),
+ (f * 10**13, " 31.4159E+12"),
+ (f * 10**14, " 314.1593E+12"),
+ (f * 10**15, " 3.1416E+15"),
+ (f * 10**16, " 31.4159E+15"),
+ (f * 10**17, " 314.1593E+15"),
+ (f * 10**18, " 3.1416E+18"),
+ (f * 10**19, " 31.4159E+18"),
+ (f * 10**20, " 314.1593E+18"),
+ (f * 10**21, " 3.1416E+21"),
+ (f * 10**22, " 31.4159E+21"),
+ (f * 10**23, " 314.1593E+21"),
+ (f * 10**24, " 3.1416E+24"),
+ (f * 10**25, " 31.4159E+24"),
+ (f * 10**26, " 314.1593E+24"),
]
self.compare_all(formatter, in_out)
def test_rounding(self):
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
--- a/pandas:pandas/tests/io/json/test_ujson.py
+++ b/pandas:pandas/tests/io/json/test_ujson.py
@@ -427,17 +427,17 @@
def test_datetime_units(self):
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit="s"))
- assert roundtrip == stamp.value // 10 ** 9
+ assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit="ms"))
- assert roundtrip == stamp.value // 10 ** 6
+ assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit="us"))
- assert roundtrip == stamp.value // 10 ** 3
+ assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit="ns"))
assert roundtrip == stamp.value
msg = "Invalid value 'foo' for option 'date_unit'"
@@ -628,11 +628,11 @@
def test_loads_non_str_bytes_raises(self):
msg = "Expected 'str' or 'bytes'"
with pytest.raises(TypeError, match=msg):
ujson.loads(None)
- @pytest.mark.parametrize("val", [3590016419, 2 ** 31, 2 ** 32, (2 ** 32) - 1])
+ @pytest.mark.parametrize("val", [3590016419, 2**31, 2**32, (2**32) - 1])
def test_decode_number_with_32bit_sign_bit(self, val):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
doc = f'{{"id": {val}}}'
assert ujson.decode(doc)["id"] == val
--- a/pandas:pandas/tests/io/parser/common/test_ints.py
+++ b/pandas:pandas/tests/io/parser/common/test_ints.py
@@ -191,11 +191,11 @@
expected = DataFrame([str(val)])
tm.assert_frame_equal(result, expected)
@skip_pyarrow
-@pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]])
+@pytest.mark.parametrize("exp_data", [[str(-1), str(2**63)], [str(2**63), str(-1)]])
def test_numeric_range_too_wide(all_parsers, exp_data):
# No numerical dtype can hold both negative and uint64
# values, so they should be cast as string.
parser = all_parsers
data = "\n".join(exp_data)
--- a/pandas:pandas/tests/io/parser/test_na_values.py
+++ b/pandas:pandas/tests/io/parser/test_na_values.py
@@ -466,16 +466,16 @@
@skip_pyarrow
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
- str(2 ** 63) + "\n" + str(2 ** 63 + 1),
- {"na_values": [2 ** 63]},
- DataFrame([str(2 ** 63), str(2 ** 63 + 1)]),
- ),
- (str(2 ** 63) + ",1" + "\n,2", {}, DataFrame([[str(2 ** 63), 1], ["", 2]])),
- (str(2 ** 63) + "\n1", {"na_values": [2 ** 63]}, DataFrame([np.nan, 1])),
+ str(2**63) + "\n" + str(2**63 + 1),
+ {"na_values": [2**63]},
+ DataFrame([str(2**63), str(2**63 + 1)]),
+ ),
+ (str(2**63) + ",1" + "\n,2", {}, DataFrame([[str(2**63), 1], ["", 2]])),
+ (str(2**63) + "\n1", {"na_values": [2**63]}, DataFrame([np.nan, 1])),
],
)
def test_na_values_uint64(all_parsers, data, kwargs, expected):
# see gh-14983
parser = all_parsers
--- a/pandas:pandas/tests/io/pytables/test_append.py
+++ b/pandas:pandas/tests/io/pytables/test_append.py
@@ -76,14 +76,14 @@
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
- np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
+ np.random.randint(0, high=2**30, size=5), dtype=np.uint32
),
"u64": Series(
- [2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
+ [2**58, 2**59, 2**60, 2**61, 2**62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
--- a/pandas:pandas/tests/io/pytables/test_compat.py
+++ b/pandas:pandas/tests/io/pytables/test_compat.py
@@ -21,11 +21,11 @@
t0 = 1_561_105_000.0
testsamples = [
{"c0": t0, "c1": "aaaaa", "c2": 1},
{"c0": t0 + 1, "c1": "bbbbb", "c2": 2},
- {"c0": t0 + 2, "c1": "ccccc", "c2": 10 ** 5},
+ {"c0": t0 + 2, "c1": "ccccc", "c2": 10**5},
{"c0": t0 + 3, "c1": "ddddd", "c2": 4_294_967_295},
]
objname = "pandas_test_timeseries"
--- a/pandas:pandas/tests/io/test_sql.py
+++ b/pandas:pandas/tests/io/test_sql.py
@@ -357,11 +357,11 @@
@pytest.fixture
def test_frame3():
columns = ["index", "A", "B"]
data = [
- ("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
+ ("2000-01-03 00:00:00", 2**31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
return DataFrame(data, columns=columns)
@@ -1554,11 +1554,11 @@
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
- df = DataFrame(data={"i64": [2 ** 62]})
+ df = DataFrame(data={"i64": [2**62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
@@ -1796,11 +1796,11 @@
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
- s1 = Series(2 ** 25 + 1, dtype=np.int32)
+ s1 = Series(2**25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
--- a/pandas:pandas/tests/io/test_stata.py
+++ b/pandas:pandas/tests/io/test_stata.py
@@ -475,13 +475,13 @@
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), formatted)
def test_read_write_dta13(self):
- s1 = Series(2 ** 9, dtype=np.int16)
- s2 = Series(2 ** 17, dtype=np.int32)
- s3 = Series(2 ** 33, dtype=np.int64)
+ s1 = Series(2**9, dtype=np.int16)
+ s2 = Series(2**17, dtype=np.int32)
+ s3 = Series(2**33, dtype=np.int64)
original = DataFrame({"int16": s1, "int32": s2, "int64": s3})
original.index.name = "index"
formatted = original
formatted["int64"] = formatted["int64"].astype(np.float64)
@@ -608,12 +608,12 @@
tm.assert_frame_equal(written_and_read_again.set_index("index"), original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
- s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
- s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
+ s2 = Series([1, 2**15 - 1], dtype=np.int16)
+ s3 = Series([1, 2**63 - 1], dtype=np.int64)
original = DataFrame({"s0": s0, "s1": s1, "s2": s2, "s3": s3})
original.index.name = "index"
with tm.ensure_clean() as path:
with tm.assert_produces_warning(PossiblePrecisionLoss):
original.to_stata(path)
@@ -697,14 +697,14 @@
@pytest.mark.parametrize("byteorder", [">", "<"])
def test_bool_uint(self, byteorder, version):
s0 = Series([0, 1, True], dtype=np.bool_)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
- s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
- s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
- s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
- s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
+ s3 = Series([0, 1, 2**15 - 100], dtype=np.uint16)
+ s4 = Series([0, 1, 2**16 - 1], dtype=np.uint16)
+ s5 = Series([0, 1, 2**31 - 100], dtype=np.uint32)
+ s6 = Series([0, 1, 2**32 - 1], dtype=np.uint32)
original = DataFrame(
{"s0": s0, "s1": s1, "s2": s2, "s3": s3, "s4": s4, "s5": s5, "s6": s6}
)
original.index.name = "index"
@@ -1991,11 +1991,11 @@
tm.assert_frame_equal(chunk, df.iloc[j * 100 : (j + 1) * 100])
def test_precision_loss():
df = DataFrame(
- [[sum(2 ** i for i in range(60)), sum(2 ** i for i in range(52))]],
+ [[sum(2**i for i in range(60)), sum(2**i for i in range(52))]],
columns=["big", "little"],
)
with tm.ensure_clean() as path:
with tm.assert_produces_warning(
PossiblePrecisionLoss, match="Column converted from int64 to float64"
--- a/pandas:pandas/tests/plotting/test_converter.py
+++ b/pandas:pandas/tests/plotting/test_converter.py
@@ -227,11 +227,11 @@
rs = self.dtc.convert(Index([ts - Day(1), ts]).to_pydatetime(), None, None)
assert rs[1] == xp
def test_conversion_float(self):
- rtol = 0.5 * 10 ** -9
+ rtol = 0.5 * 10**-9
rs = self.dtc.convert(Timestamp("2012-1-1 01:02:03", tz="UTC"), None, None)
xp = converter.dates.date2num(Timestamp("2012-1-1 01:02:03", tz="UTC"))
tm.assert_almost_equal(rs, xp, rtol=rtol)
@@ -275,11 +275,11 @@
# issue 18478
result = self.tc(time)
assert result == format_expected
def test_dateindex_conversion(self):
- rtol = 10 ** -9
+ rtol = 10**-9
for freq in ("B", "L", "S"):
dateindex = tm.makeDateIndex(k=10, freq=freq)
rs = self.dtc.convert(dateindex, None, None)
xp = converter.dates.date2num(dateindex._mpl_repr())
--- a/pandas:pandas/tests/reductions/test_reductions.py
+++ b/pandas:pandas/tests/reductions/test_reductions.py
@@ -211,12 +211,12 @@
@pytest.mark.parametrize(
"start,stop,step",
[
(0, 400, 3),
(500, 0, -6),
- (-(10 ** 6), 10 ** 6, 4),
- (10 ** 6, -(10 ** 6), -4),
+ (-(10**6), 10**6, 4),
+ (10**6, -(10**6), -4),
(0, 10, 20),
],
)
def test_max_min_range(self, start, stop, step):
# GH#17607
@@ -1452,20 +1452,20 @@
expected3 = Series(expected3, dtype="category")
tm.assert_series_equal(result, expected3)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
- [(True, [2 ** 63], [1, 2 ** 63]), (False, [2 ** 63], [1, 2 ** 63])],
+ [(True, [2**63], [1, 2**63]), (False, [2**63], [1, 2**63])],
)
def test_mode_intoverflow(self, dropna, expected1, expected2):
# Test for uint64 overflow.
- s = Series([1, 2 ** 63, 2 ** 63], dtype=np.uint64)
+ s = Series([1, 2**63, 2**63], dtype=np.uint64)
result = s.mode(dropna)
expected1 = Series(expected1, dtype=np.uint64)
tm.assert_series_equal(result, expected1)
- s = Series([1, 2 ** 63], dtype=np.uint64)
+ s = Series([1, 2**63], dtype=np.uint64)
result = s.mode(dropna)
expected2 = Series(expected2, dtype=np.uint64)
tm.assert_series_equal(result, expected2)
def test_mode_sortwarning(self):
--- a/pandas:pandas/tests/reductions/test_stat_reductions.py
+++ b/pandas:pandas/tests/reductions/test_stat_reductions.py
@@ -127,11 +127,11 @@
s = Series([1, 2, 3, None, 5])
f(s)
# GH#2888
items = [0]
- items.extend(range(2 ** 40, 2 ** 40 + 1000))
+ items.extend(range(2**40, 2**40 + 1000))
s = Series(items, dtype="int64")
tm.assert_almost_equal(float(f(s)), float(alternate(s.values)))
# check date range
if check_objects:
--- a/pandas:pandas/tests/reshape/test_pivot.py
+++ b/pandas:pandas/tests/reshape/test_pivot.py
@@ -1990,11 +1990,11 @@
@pytest.mark.slow
def test_pivot_number_of_levels_larger_than_int32(self):
# GH 20601
df = DataFrame(
- {"ind1": np.arange(2 ** 16), "ind2": np.arange(2 ** 16), "count": 0}
+ {"ind1": np.arange(2**16), "ind2": np.arange(2**16), "count": 0}
)
msg = "Unstacked DataFrame is too big, causing int32 overflow"
with pytest.raises(ValueError, match=msg):
df.pivot_table(
--- a/pandas:pandas/tests/scalar/test_na_scalar.py
+++ b/pandas:pandas/tests/scalar/test_na_scalar.py
@@ -98,11 +98,11 @@
)
@pytest.mark.parametrize("asarray", [True, False])
def test_pow_special(value, asarray):
if asarray:
value = np.array([value])
- result = NA ** value
+ result = NA**value
if asarray:
result = result[0]
else:
# this assertion isn't possible for ndarray.
@@ -115,11 +115,11 @@
)
@pytest.mark.parametrize("asarray", [True, False])
def test_rpow_special(value, asarray):
if asarray:
value = np.array([value])
- result = value ** NA
+ result = value**NA
if asarray:
result = result[0]
elif not isinstance(value, (np.float_, np.bool_, np.int_)):
# this assertion isn't possible with asarray=True
@@ -131,11 +131,11 @@
@pytest.mark.parametrize("value", [-1, -1.0, np.int_(-1), np.float_(-1)])
@pytest.mark.parametrize("asarray", [True, False])
def test_rpow_minus_one(value, asarray):
if asarray:
value = np.array([value])
- result = value ** NA
+ result = value**NA
if asarray:
result = result[0]
assert pd.isna(result)
--- a/pandas:pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas:pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -399,13 +399,13 @@
def test_td_div_td64_non_nano(self):
# truediv
td = Timedelta("1 days 2 hours 3 ns")
result = td / np.timedelta64(1, "D")
- assert result == td.value / (86400 * 10 ** 9)
+ assert result == td.value / (86400 * 10**9)
result = td / np.timedelta64(1, "s")
- assert result == td.value / 10 ** 9
+ assert result == td.value / 10**9
result = td / np.timedelta64(1, "ns")
assert result == td.value
# floordiv
td = Timedelta("1 days 2 hours 3 ns")
@@ -654,11 +654,11 @@
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_rfloordiv_intarray(self):
# deprecated GH#19761, enforced GH#29797
- ints = np.array([1349654400, 1349740800, 1349827200, 1349913600]) * 10 ** 9
+ ints = np.array([1349654400, 1349740800, 1349827200, 1349913600]) * 10**9
msg = "Invalid dtype"
with pytest.raises(TypeError, match=msg):
ints // Timedelta(1, unit="s")
--- a/pandas:pandas/tests/scalar/timestamp/test_arithmetic.py
+++ b/pandas:pandas/tests/scalar/timestamp/test_arithmetic.py
@@ -58,11 +58,11 @@
# xref https://github.com/pandas-dev/pandas/issues/14080
# used to crash, so check for proper overflow exception
stamp = Timestamp("2000/1/1")
- offset_overflow = to_offset("D") * 100 ** 5
+ offset_overflow = to_offset("D") * 100**5
with pytest.raises(OverflowError, match=lmsg):
stamp + offset_overflow
with pytest.raises(OverflowError, match=msg):
--- a/pandas:pandas/tests/series/methods/test_fillna.py
+++ b/pandas:pandas/tests/series/methods/test_fillna.py
@@ -235,11 +235,11 @@
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
- result = obj.fillna(np.timedelta64(10 ** 9))
+ result = obj.fillna(np.timedelta64(10**9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
--- a/pandas:pandas/tests/series/methods/test_isin.py
+++ b/pandas:pandas/tests/series/methods/test_isin.py
@@ -20,11 +20,11 @@
# GH#16012
# This specific issue has to have a series over 1e6 in len, but the
# comparison array (in_list) must be large enough so that numpy doesn't
# do a manual masking trick that will avoid this issue altogether
- s = Series(list("abcdefghijk" * 10 ** 5))
+ s = Series(list("abcdefghijk" * 10**5))
# If numpy doesn't do the manual comparison/mask, these
# unorderable mixed types are what cause the exception in numpy
in_list = [-1, "a", "b", "G", "Y", "Z", "E", "K", "E", "S", "I", "R", "R"] * 6
assert s.isin(in_list).sum() == 200000
--- a/pandas:pandas/tests/series/methods/test_rank.py
+++ b/pandas:pandas/tests/series/methods/test_rank.py
@@ -481,8 +481,8 @@
@pytest.mark.single
@pytest.mark.high_memory
def test_pct_max_many_rows():
# GH 18271
- s = Series(np.arange(2 ** 24 + 1))
+ s = Series(np.arange(2**24 + 1))
result = s.rank(pct=True).max()
assert result == 1
--- a/pandas:pandas/tests/series/test_constructors.py
+++ b/pandas:pandas/tests/series/test_constructors.py
@@ -1569,32 +1569,32 @@
result = Series(range(5), dtype=dtype)
tm.assert_series_equal(result, expected)
def test_constructor_range_overflows(self):
# GH#30173 range objects that overflow int64
- rng = range(2 ** 63, 2 ** 63 + 4)
+ rng = range(2**63, 2**63 + 4)
ser = Series(rng)
expected = Series(list(rng))
tm.assert_series_equal(ser, expected)
assert list(ser) == list(rng)
assert ser.dtype == np.uint64
- rng2 = range(2 ** 63 + 4, 2 ** 63, -1)
+ rng2 = range(2**63 + 4, 2**63, -1)
ser2 = Series(rng2)
expected2 = Series(list(rng2))
tm.assert_series_equal(ser2, expected2)
assert list(ser2) == list(rng2)
assert ser2.dtype == np.uint64
- rng3 = range(-(2 ** 63), -(2 ** 63) - 4, -1)
+ rng3 = range(-(2**63), -(2**63) - 4, -1)
ser3 = Series(rng3)
expected3 = Series(list(rng3))
tm.assert_series_equal(ser3, expected3)
assert list(ser3) == list(rng3)
assert ser3.dtype == object
- rng4 = range(2 ** 73, 2 ** 73 + 4)
+ rng4 = range(2**73, 2**73 + 4)
ser4 = Series(rng4)
expected4 = Series(list(rng4))
tm.assert_series_equal(ser4, expected4)
assert list(ser4) == list(rng4)
assert ser4.dtype == object
--- a/pandas:pandas/tests/test_algos.py
+++ b/pandas:pandas/tests/test_algos.py
@@ -267,24 +267,24 @@
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_uint64_factorize(self, writable):
- data = np.array([2 ** 64 - 1, 1, 2 ** 64 - 1], dtype=np.uint64)
+ data = np.array([2**64 - 1, 1, 2**64 - 1], dtype=np.uint64)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0], dtype=np.intp)
- expected_uniques = np.array([2 ** 64 - 1, 1], dtype=np.uint64)
+ expected_uniques = np.array([2**64 - 1, 1], dtype=np.uint64)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_int64_factorize(self, writable):
- data = np.array([2 ** 63 - 1, -(2 ** 63), 2 ** 63 - 1], dtype=np.int64)
+ data = np.array([2**63 - 1, -(2**63), 2**63 - 1], dtype=np.int64)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0], dtype=np.intp)
- expected_uniques = np.array([2 ** 63 - 1, -(2 ** 63)], dtype=np.int64)
+ expected_uniques = np.array([2**63 - 1, -(2**63)], dtype=np.int64)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
@@ -355,21 +355,21 @@
tm.assert_index_equal(result[1], expected[1], exact=True)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
- data = np.array([2 ** 63, 1, 2 ** 63], dtype=np.uint64)
+ data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with pytest.raises(TypeError, match="got an unexpected keyword"):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize(
"data",
[
np.array([0, 1, 0], dtype="u8"),
- np.array([-(2 ** 63), 1, -(2 ** 63)], dtype="i8"),
+ np.array([-(2**63), 1, -(2**63)], dtype="i8"),
np.array(["__nan__", "foo", "__nan__"], dtype="object"),
],
)
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
@@ -382,12 +382,12 @@
@pytest.mark.parametrize(
"data, na_value",
[
(np.array([0, 1, 0, 2], dtype="u8"), 0),
(np.array([1, 0, 1, 2], dtype="u8"), 1),
- (np.array([-(2 ** 63), 1, -(2 ** 63), 0], dtype="i8"), -(2 ** 63)),
- (np.array([1, -(2 ** 63), 1, 0], dtype="i8"), 1),
+ (np.array([-(2**63), 1, -(2**63), 0], dtype="i8"), -(2**63)),
+ (np.array([1, -(2**63), 1, 0], dtype="i8"), 1),
(np.array(["a", "", "a", "b"], dtype=object), "a"),
(np.array([(), ("a", 1), (), ("a", 2)], dtype=object), ()),
(np.array([("a", 1), (), ("a", 1), ("a", 2)], dtype=object), ("a", 1)),
],
)
@@ -598,12 +598,12 @@
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
- s = Series([1, 2, 2 ** 63, 2 ** 63], dtype=np.uint64)
- exp = np.array([1, 2, 2 ** 63], dtype=np.uint64)
+ s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
+ exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
duplicated_items = ["a", np.nan, "c", "c"]
result = pd.unique(duplicated_items)
@@ -1278,18 +1278,18 @@
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.6, 0.4], index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
- arr = np.array([2 ** 63], dtype=np.uint64)
- expected = Series([1], index=[2 ** 63])
+ arr = np.array([2**63], dtype=np.uint64)
+ expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
- arr = np.array([-1, 2 ** 63], dtype=object)
- expected = Series([1, 1], index=[-1, 2 ** 63])
+ arr = np.array([-1, 2**63], dtype=object)
+ expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
@@ -1352,11 +1352,11 @@
6 + 6j,
]
),
np.array(["a", "b", "a", "e", "c", "b", "d", "a", "e", "f"], dtype=object),
np.array(
- [1, 2 ** 63, 1, 3 ** 5, 10, 2 ** 63, 39, 1, 3 ** 5, 7], dtype=np.uint64
+ [1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7], dtype=np.uint64
),
],
)
def test_numeric_object_likes(self, case):
exp_first = np.array(
@@ -1573,20 +1573,20 @@
m.set_item(NAN1, 0)
m.set_item(NAN2, 0)
assert len(m) == 1 # NAN1 and NAN2 are equivalent
def test_lookup_overflow(self, writable):
- xs = np.array([1, 2, 2 ** 63], dtype=np.uint64)
+ xs = np.array([1, 2, 2**63], dtype=np.uint64)
# GH 21688 ensure we can deal with readonly memory views
xs.setflags(write=writable)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp))
def test_get_unique(self):
- s = Series([1, 2, 2 ** 63, 2 ** 63], dtype=np.uint64)
- exp = np.array([1, 2, 2 ** 63], dtype=np.uint64)
+ s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
+ exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
@pytest.mark.parametrize("nvals", [0, 10]) # resizing to 0 is special case
@pytest.mark.parametrize(
"htable, uniques, dtype, safely_resizes",
@@ -1773,11 +1773,11 @@
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
- s = Series([1, 2 ** 63], dtype=dtype)
+ s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
@@ -1787,15 +1787,15 @@
@pytest.mark.single
@pytest.mark.high_memory
def test_pct_max_many_rows(self):
# GH 18271
- values = np.arange(2 ** 24 + 1)
+ values = np.arange(2**24 + 1)
result = algos.rank(values, pct=True).max()
assert result == 1
- values = np.arange(2 ** 25 + 2).reshape(2 ** 24 + 1, 2)
+ values = np.arange(2**25 + 2).reshape(2**24 + 1, 2)
result = algos.rank(values, pct=True).max()
assert result == 1
def test_pad_backfill_object_segfault():
@@ -2359,16 +2359,16 @@
exp = Series(["foo"])
s = Series([1, "foo", "foo"])
tm.assert_series_equal(algos.mode(s), exp)
def test_uint64_overflow(self):
- exp = Series([2 ** 63], dtype=np.uint64)
- s = Series([1, 2 ** 63, 2 ** 63], dtype=np.uint64)
+ exp = Series([2**63], dtype=np.uint64)
+ s = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
- exp = Series([1, 2 ** 63], dtype=np.uint64)
- s = Series([1, 2 ** 63], dtype=np.uint64)
+ exp = Series([1, 2**63], dtype=np.uint64)
+ s = Series([1, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
def test_categorical(self):
c = Categorical([1, 2])
exp = c
--- a/pandas:pandas/tests/test_common.py
+++ b/pandas:pandas/tests/test_common.py
@@ -60,11 +60,11 @@
# check with no arg random state
assert com.random_state() is np.random
# check array-like
# GH32503
- state_arr_like = npr.randint(0, 2 ** 31, size=624, dtype="uint32")
+ state_arr_like = npr.randint(0, 2**31, size=624, dtype="uint32")
assert (
com.random_state(state_arr_like).uniform()
== npr.RandomState(state_arr_like).uniform()
)
--- a/pandas:pandas/tests/test_nanops.py
+++ b/pandas:pandas/tests/test_nanops.py
@@ -295,11 +295,11 @@
def test_nanmean_overflow(self):
# GH 10155
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy
- for a in [2 ** 55, -(2 ** 55), 20150515061816532]:
+ for a in [2**55, -(2**55), 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
np_result = s.values.mean()
assert result == a
assert result == np_result
@@ -779,11 +779,11 @@
# xref GH10242
def setup_method(self, method):
# Samples from a normal distribution.
self.variance = variance = 3.0
- self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)
+ self.samples = self.prng.normal(scale=variance**0.5, size=100000)
def test_nanvar_all_finite(self):
samples = self.samples
actual_variance = nanops.nanvar(samples)
tm.assert_almost_equal(actual_variance, self.variance, rtol=1e-2)
@@ -801,11 +801,11 @@
def test_nanstd_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_std = nanops.nanstd(samples, skipna=True)
- tm.assert_almost_equal(actual_std, self.variance ** 0.5, rtol=1e-2)
+ tm.assert_almost_equal(actual_std, self.variance**0.5, rtol=1e-2)
actual_std = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_std, np.nan, rtol=1e-2)
def test_nanvar_axis(self):
--- a/pandas:pandas/tests/tools/test_to_datetime.py
+++ b/pandas:pandas/tests/tools/test_to_datetime.py
@@ -935,11 +935,11 @@
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None])
@pytest.mark.parametrize("constructor", [list, tuple, np.array, Index, deque])
def test_to_datetime_cache(self, utc, format, constructor):
date = "20130101 00:00:00"
- test_dates = [date] * 10 ** 5
+ test_dates = [date] * 10**5
data = constructor(test_dates)
result = to_datetime(data, utc=utc, format=format, cache=True)
expected = to_datetime(data, utc=utc, format=format, cache=False)
@@ -965,11 +965,11 @@
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None])
def test_to_datetime_cache_series(self, utc, format):
date = "20130101 00:00:00"
- test_dates = [date] * 10 ** 5
+ test_dates = [date] * 10**5
data = Series(test_dates)
result = to_datetime(data, utc=utc, format=format, cache=True)
expected = to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_series_equal(result, expected)
@@ -2574,11 +2574,11 @@
tools.should_cache(arg, unique_share, check_count)
def test_nullable_integer_to_datetime():
# Test for #30050
- ser = Series([1, 2, None, 2 ** 61, None])
+ ser = Series([1, 2, None, 2**61, None])
ser = ser.astype("Int64")
ser_copy = ser.copy()
res = to_datetime(ser, unit="ns")
--- a/pandas:pandas/tests/tools/test_to_timedelta.py
+++ b/pandas:pandas/tests/tools/test_to_timedelta.py
@@ -221,11 +221,11 @@
def test_to_timedelta_float(self):
# https://github.com/pandas-dev/pandas/issues/25077
arr = np.arange(0, 1, 1e-6)[-10:]
result = to_timedelta(arr, unit="s")
- expected_asi8 = np.arange(999990000, 10 ** 9, 1000, dtype="int64")
+ expected_asi8 = np.arange(999990000, 10**9, 1000, dtype="int64")
tm.assert_numpy_array_equal(result.asi8, expected_asi8)
def test_to_timedelta_coerce_strings_unit(self):
arr = np.array([1, 2, "error"], dtype=object)
result = to_timedelta(arr, unit="ns", errors="coerce")
--- a/pandas:pandas/tests/tslibs/test_fields.py
+++ b/pandas:pandas/tests/tslibs/test_fields.py
@@ -6,11 +6,11 @@
def test_fields_readonly():
# https://github.com/vaexio/vaex/issues/357
# fields functions shouldn't raise when we pass read-only data
- dtindex = np.arange(5, dtype=np.int64) * 10 ** 9 * 3600 * 24 * 32
+ dtindex = np.arange(5, dtype=np.int64) * 10**9 * 3600 * 24 * 32
dtindex.flags.writeable = False
result = fields.get_date_name_field(dtindex, "month_name")
expected = np.array(["January", "February", "March", "April", "May"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
--- a/pandas:pandas/tests/util/test_assert_extension_array_equal.py
+++ b/pandas:pandas/tests/util/test_assert_extension_array_equal.py
@@ -33,11 +33,11 @@
tm.assert_extension_array_equal(arr1, arr2, **kwargs)
@pytest.mark.parametrize("decimals", range(10))
def test_assert_extension_array_equal_less_precise(decimals):
- rtol = 0.5 * 10 ** -decimals
+ rtol = 0.5 * 10**-decimals
arr1 = SparseArray([0.5, 0.123456])
arr2 = SparseArray([0.5, 0.123457])
if decimals >= 5:
msg = """\
--- a/pandas:pandas/tests/util/test_assert_series_equal.py
+++ b/pandas:pandas/tests/util/test_assert_series_equal.py
@@ -106,11 +106,11 @@
@pytest.mark.parametrize("data1,data2", [(0.12345, 0.12346), (0.1235, 0.1236)])
@pytest.mark.parametrize("dtype", ["float32", "float64"])
@pytest.mark.parametrize("decimals", [0, 1, 2, 3, 5, 10])
def test_less_precise(data1, data2, dtype, decimals):
- rtol = 10 ** -decimals
+ rtol = 10**-decimals
s1 = Series([data1], dtype=dtype)
s2 = Series([data2], dtype=dtype)
if (decimals == 5 or decimals == 10) or (
decimals >= 3 and abs(data1 - data2) >= 0.0005
[pillow - https://github.com/python-pillow/Pillow.git]
╰─> revision fa53b71afebe63fe5e0e9d80c84562246b42d2d3
--- a/pillow:Tests/32bit_segfault_check.py
+++ b/pillow:Tests/32bit_segfault_check.py
@@ -2,7 +2,7 @@
import sys
from PIL import Image
-if sys.maxsize < 2 ** 32:
+if sys.maxsize < 2**32:
im = Image.new("L", (999999, 999999), 0)
--- a/pillow:Tests/check_large_memory.py
+++ b/pillow:Tests/check_large_memory.py
@@ -21,11 +21,11 @@
YDIM = 32769
XDIM = 48000
-pytestmark = pytest.mark.skipif(sys.maxsize <= 2 ** 32, reason="requires 64-bit system")
+pytestmark = pytest.mark.skipif(sys.maxsize <= 2**32, reason="requires 64-bit system")
def _write_png(tmp_path, xdim, ydim):
f = str(tmp_path / "temp.png")
im = Image.new("L", (xdim, ydim), 0)
--- a/pillow:Tests/check_large_memory_numpy.py
+++ b/pillow:Tests/check_large_memory_numpy.py
@@ -17,11 +17,11 @@
YDIM = 32769
XDIM = 48000
-pytestmark = pytest.mark.skipif(sys.maxsize <= 2 ** 32, reason="requires 64-bit system")
+pytestmark = pytest.mark.skipif(sys.maxsize <= 2**32, reason="requires 64-bit system")
def _write_png(tmp_path, xdim, ydim):
dtype = np.uint8
a = np.zeros((xdim, ydim), dtype=dtype)
--- a/pillow:Tests/test_core_resources.py
+++ b/pillow:Tests/test_core_resources.py
@@ -108,13 +108,13 @@
# Try to construct new image
Image.new("RGB", (10, 10))
with pytest.raises(ValueError):
Image.core.set_blocks_max(-1)
- if sys.maxsize < 2 ** 32:
+ if sys.maxsize < 2**32:
with pytest.raises(ValueError):
- Image.core.set_blocks_max(2 ** 29)
+ Image.core.set_blocks_max(2**29)
@pytest.mark.skipif(is_pypy(), reason="Images not collected")
def test_set_blocks_max_stats(self):
Image.core.reset_stats()
Image.core.set_blocks_max(128)
--- a/pillow:Tests/test_file_libtiff.py
+++ b/pillow:Tests/test_file_libtiff.py
@@ -216,11 +216,11 @@
# 12: "double",
# Type: dummy value
values = {
2: "test",
3: 1,
- 4: 2 ** 20,
+ 4: 2**20,
5: TiffImagePlugin.IFDRational(100, 1),
12: 1.05,
}
new_ifd = TiffImagePlugin.ImageFileDirectory_v2()
@@ -989,11 +989,11 @@
def test_save_single_strip(self, tmp_path):
im = hopper("RGB").resize((256, 256))
out = str(tmp_path / "temp.tif")
- TiffImagePlugin.STRIP_SIZE = 2 ** 18
+ TiffImagePlugin.STRIP_SIZE = 2**18
try:
im.save(out, compression="tiff_adobe_deflate")
with Image.open(out) as im:
--- a/pillow:Tests/test_file_tiff_metadata.py
+++ b/pillow:Tests/test_file_tiff_metadata.py
@@ -256,11 +256,11 @@
def test_ifd_unsigned_rational(tmp_path):
im = hopper()
info = TiffImagePlugin.ImageFileDirectory_v2()
- max_long = 2 ** 32 - 1
+ max_long = 2**32 - 1
# 4 bytes unsigned long
numerator = max_long
info[41493] = TiffImagePlugin.IFDRational(numerator, 1)
@@ -288,24 +288,24 @@
def test_ifd_signed_rational(tmp_path):
im = hopper()
info = TiffImagePlugin.ImageFileDirectory_v2()
# pair of 4 byte signed longs
- numerator = 2 ** 31 - 1
- denominator = -(2 ** 31)
+ numerator = 2**31 - 1
+ denominator = -(2**31)
info[37380] = TiffImagePlugin.IFDRational(numerator, denominator)
out = str(tmp_path / "temp.tiff")
im.save(out, tiffinfo=info, compression="raw")
with Image.open(out) as reloaded:
assert numerator == reloaded.tag_v2[37380].numerator
assert denominator == reloaded.tag_v2[37380].denominator
- numerator = -(2 ** 31)
- denominator = 2 ** 31 - 1
+ numerator = -(2**31)
+ denominator = 2**31 - 1
info[37380] = TiffImagePlugin.IFDRational(numerator, denominator)
out = str(tmp_path / "temp.tiff")
im.save(out, tiffinfo=info, compression="raw")
@@ -313,20 +313,20 @@
with Image.open(out) as reloaded:
assert numerator == reloaded.tag_v2[37380].numerator
assert denominator == reloaded.tag_v2[37380].denominator
# out of bounds of 4 byte signed long
- numerator = -(2 ** 31) - 1
+ numerator = -(2**31) - 1
denominator = 1
info[37380] = TiffImagePlugin.IFDRational(numerator, denominator)
out = str(tmp_path / "temp.tiff")
im.save(out, tiffinfo=info, compression="raw")
with Image.open(out) as reloaded:
- assert 2 ** 31 - 1 == reloaded.tag_v2[37380].numerator
+ assert 2**31 - 1 == reloaded.tag_v2[37380].numerator
assert -1 == reloaded.tag_v2[37380].denominator
def test_ifd_signed_long(tmp_path):
im = hopper()
--- a/pillow:Tests/test_file_webp.py
+++ b/pillow:Tests/test_file_webp.py
@@ -125,11 +125,11 @@
similar to the original file.
"""
self._roundtrip(tmp_path, "P", 50.0)
- @pytest.mark.skipif(sys.maxsize <= 2 ** 32, reason="Requires 64-bit system")
+ @pytest.mark.skipif(sys.maxsize <= 2**32, reason="Requires 64-bit system")
def test_write_encoding_error_message(self, tmp_path):
temp_file = str(tmp_path / "temp.webp")
im = Image.new("RGB", (15000, 15000))
with pytest.raises(ValueError) as e:
im.save(temp_file, method=0)
--- a/pillow:Tests/test_image_access.py
+++ b/pillow:Tests/test_image_access.py
@@ -203,14 +203,14 @@
def test_signedness(self):
# see https://github.com/python-pillow/Pillow/issues/452
# pixelaccess is using signed int* instead of uint*
for mode in ("I;16", "I;16B"):
- self.check(mode, 2 ** 15 - 1)
- self.check(mode, 2 ** 15)
- self.check(mode, 2 ** 15 + 1)
- self.check(mode, 2 ** 16 - 1)
+ self.check(mode, 2**15 - 1)
+ self.check(mode, 2**15)
+ self.check(mode, 2**15 + 1)
+ self.check(mode, 2**16 - 1)
def test_p_putpixel_rgb_rgba(self):
for color in [(255, 0, 0), (255, 0, 0, 255)]:
im = Image.new("P", (1, 1), 0)
im.putpixel((0, 0), color)
@@ -384,11 +384,11 @@
@pytest.mark.parametrize("mode", IMAGE_MODES1 + IMAGE_MODES2)
def test_putpixel_overflow_error(self, mode):
im = hopper(mode)
with pytest.raises(OverflowError):
- im.putpixel((0, 0), 2 ** 80)
+ im.putpixel((0, 0), 2**80)
def test_putpixel_unrecognized_mode(self):
im = hopper("BGR;15")
with pytest.raises(ValueError, match="unrecognized image mode"):
im.putpixel((0, 0), 0)
--- a/pillow:Tests/test_image_putdata.py
+++ b/pillow:Tests/test_image_putdata.py
@@ -34,11 +34,11 @@
assert put(0xFFFFFFFF) == (255, 255, 255, 255)
assert put(0xFFFFFFFF) == (255, 255, 255, 255)
assert put(-1) == (255, 255, 255, 255)
assert put(-1) == (255, 255, 255, 255)
- if sys.maxsize > 2 ** 32:
+ if sys.maxsize > 2**32:
assert put(sys.maxsize) == (255, 255, 255, 255)
else:
assert put(sys.maxsize) == (255, 255, 255, 127)
--- a/pillow:Tests/test_imagecms.py
+++ b/pillow:Tests/test_imagecms.py
@@ -301,11 +301,11 @@
p = o.profile
def assert_truncated_tuple_equal(tup1, tup2, digits=10):
# Helper function to reduce precision of tuples of floats
# recursively and then check equality.
- power = 10 ** digits
+ power = 10**digits
def truncate_tuple(tuple_or_float):
return tuple(
truncate_tuple(val)
if isinstance(val, tuple)
--- a/pillow:Tests/test_imageqt.py
+++ b/pillow:Tests/test_imageqt.py
@@ -28,14 +28,14 @@
assert qRgb(0, 0, 0) == qRgba(0, 0, 0, 255)
def checkrgb(r, g, b):
val = ImageQt.rgb(r, g, b)
- val = val % 2 ** 24 # drop the alpha
+ val = val % 2**24 # drop the alpha
assert val >> 16 == r
- assert ((val >> 8) % 2 ** 8) == g
- assert val % 2 ** 8 == b
+ assert ((val >> 8) % 2**8) == g
+ assert val % 2**8 == b
checkrgb(0, 0, 0)
checkrgb(255, 0, 0)
checkrgb(0, 255, 0)
checkrgb(0, 0, 255)
--- a/pillow:Tests/test_imagestat.py
+++ b/pillow:Tests/test_imagestat.py
@@ -49,12 +49,12 @@
im = Image.new("L", (128, 128), 128)
st = ImageStat.Stat(im)
assert st.extrema[0] == (128, 128)
- assert st.sum[0] == 128 ** 3
- assert st.sum2[0] == 128 ** 4
+ assert st.sum[0] == 128**3
+ assert st.sum2[0] == 128**4
assert st.mean[0] == 128
assert st.median[0] == 128
assert st.rms[0] == 128
assert st.var[0] == 0
assert st.stddev[0] == 0
--- a/pillow:Tests/test_map.py
+++ b/pillow:Tests/test_map.py
@@ -34,11 +34,11 @@
im.tobytes()
Image.MAX_IMAGE_PIXELS = max_pixels
-@pytest.mark.skipif(sys.maxsize <= 2 ** 32, reason="Requires 64-bit system")
+@pytest.mark.skipif(sys.maxsize <= 2**32, reason="Requires 64-bit system")
def test_ysize():
numpy = pytest.importorskip("numpy", reason="NumPy not installed")
# Should not raise 'Integer overflow in ysize'
arr = numpy.zeros((46341, 46341), dtype=numpy.uint8)
--- a/pillow:src/PIL/BmpImagePlugin.py
+++ b/pillow:src/PIL/BmpImagePlugin.py
@@ -100,11 +100,11 @@
file_info["direction"] = 1 if file_info["y_flip"] else -1
file_info["width"] = i32(header_data, 0)
file_info["height"] = (
i32(header_data, 4)
if not file_info["y_flip"]
- else 2 ** 32 - i32(header_data, 4)
+ else 2**32 - i32(header_data, 4)
)
file_info["planes"] = i16(header_data, 8)
file_info["bits"] = i16(header_data, 10)
file_info["compression"] = i32(header_data, 12)
# byte size of pixel data
@@ -318,11 +318,11 @@
# bitmap header
if bitmap_header:
offset = 14 + header + colors * 4
file_size = offset + image
- if file_size > 2 ** 32 - 1:
+ if file_size > 2**32 - 1:
raise ValueError("File size is too large for the BMP format")
fp.write(
b"BM" # file type (magic)
+ o32(file_size) # file size
+ o32(0) # reserved
--- a/pillow:src/PIL/ImageStat.py
+++ b/pillow:src/PIL/ImageStat.py
@@ -89,11 +89,11 @@
v = []
for i in range(0, len(self.h), 256):
sum2 = 0.0
for j in range(256):
- sum2 += (j ** 2) * float(self.h[i + j])
+ sum2 += (j**2) * float(self.h[i + j])
v.append(sum2)
return v
def _getmean(self):
"""Get average pixel level for each layer"""
--- a/pillow:src/PIL/Jpeg2KImagePlugin.py
+++ b/pillow:src/PIL/Jpeg2KImagePlugin.py
@@ -130,11 +130,11 @@
def _res_to_dpi(num, denom, exp):
"""Convert JPEG2000's (numerator, denominator, exponent-base-10) resolution,
calculated as (num / denom) * 10^exp and stored in dots per meter,
to floating-point dots per inch."""
if denom != 0:
- return (254 * num * (10 ** exp)) / (10000 * denom)
+ return (254 * num * (10**exp)) / (10000 * denom)
def _parse_jp2_header(fp):
"""Parse the JP2 header box to extract size, component count,
color space information, and optionally DPI information,
--- a/pillow:src/PIL/PpmImagePlugin.py
+++ b/pillow:src/PIL/PpmImagePlugin.py
@@ -103,11 +103,11 @@
elif ix == 2:
# maxgrey
if s > 255:
if not mode == "L":
raise ValueError(f"Too many colors for band: {s}")
- if s < 2 ** 16:
+ if s < 2**16:
self.mode = "I"
rawmode = "I;16B"
else:
self.mode = "I"
rawmode = "I;32B"
@@ -124,11 +124,11 @@
if im.mode == "1":
rawmode, head = "1;I", b"P4"
elif im.mode == "L":
rawmode, head = "L", b"P5"
elif im.mode == "I":
- if im.getextrema()[1] < 2 ** 16:
+ if im.getextrema()[1] < 2**16:
rawmode, head = "I;16B", b"P5"
else:
rawmode, head = "I;32B", b"P5"
elif im.mode == "RGB":
rawmode, head = "RGB", b"P6"
--- a/pillow:src/PIL/TiffImagePlugin.py
+++ b/pillow:src/PIL/TiffImagePlugin.py
@@ -569,13 +569,13 @@
TiffTags.RATIONAL
if all(v >= 0 for v in values)
else TiffTags.SIGNED_RATIONAL
)
elif all(isinstance(v, int) for v in values):
- if all(0 <= v < 2 ** 16 for v in values):
+ if all(0 <= v < 2**16 for v in values):
self.tagtype[tag] = TiffTags.SHORT
- elif all(-(2 ** 15) < v < 2 ** 15 for v in values):
+ elif all(-(2**15) < v < 2**15 for v in values):
self.tagtype[tag] = TiffTags.SIGNED_SHORT
else:
self.tagtype[tag] = (
TiffTags.LONG
if all(v >= 0 for v in values)
@@ -726,11 +726,11 @@
return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2]))
@_register_writer(5)
def write_rational(self, *values):
return b"".join(
- self._pack("2L", *_limit_rational(frac, 2 ** 32 - 1)) for frac in values
+ self._pack("2L", *_limit_rational(frac, 2**32 - 1)) for frac in values
)
@_register_loader(7, 1)
def load_undefined(self, data, legacy_api=True):
return data
@@ -749,11 +749,11 @@
return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2]))
@_register_writer(10)
def write_signed_rational(self, *values):
return b"".join(
- self._pack("2l", *_limit_signed_rational(frac, 2 ** 31 - 1, -(2 ** 31)))
+ self._pack("2l", *_limit_signed_rational(frac, 2**31 - 1, -(2**31)))
for frac in values
)
def _ensure_read(self, fp, size):
ret = fp.read(size)
@@ -1629,11 +1629,11 @@
if rows_per_strip == 0:
rows_per_strip = 1
strip_byte_counts = 1 if stride == 0 else stride * rows_per_strip
strips_per_image = (im.size[1] + rows_per_strip - 1) // rows_per_strip
ifd[ROWSPERSTRIP] = rows_per_strip
- if strip_byte_counts >= 2 ** 16:
+ if strip_byte_counts >= 2**16:
ifd.tagtype[STRIPBYTECOUNTS] = TiffTags.LONG
ifd[STRIPBYTECOUNTS] = (strip_byte_counts,) * (strips_per_image - 1) + (
stride * im.size[1] - strip_byte_counts * (strips_per_image - 1),
)
ifd[STRIPOFFSETS] = tuple(
[pyanalyze - https://github.com/quora/pyanalyze.git]
╰─> revision b985cb9d1361f04fd4c759dcf9881d5965419e77
--- a/pyanalyze:pyanalyze/test_safe.py
+++ b/pyanalyze:pyanalyze/test_safe.py
@@ -14,11 +14,11 @@
def __iter__(self) -> Iterable[int]:
yield 1
class HasGetItemAndLen(object):
def __getitem__(self, i: int) -> int:
- return i ** 2
+ return i**2
def __len__(self) -> int:
return 1 << 15
class HasGetItem(object):
[pytest - https://github.com/pytest-dev/pytest.git]
╰─> revision 0696d3eda12fcbdb4e038d9031daa4c52ad39eee
--- a/pytest:src/_pytest/doctest.py
+++ b/pytest:src/_pytest/doctest.py
@@ -648,11 +648,11 @@
if exponent is None:
exponent = w.group("exponent2")
precision = 0 if fraction is None else len(fraction)
if exponent is not None:
precision -= int(exponent)
- if float(w.group()) == approx(float(g.group()), abs=10 ** -precision):
+ if float(w.group()) == approx(float(g.group()), abs=10**-precision):
# They're close enough. Replace the text we actually
# got with the text we want, so that it will match when we
# check the string literally.
got = (
got[: g.start() + offset] + w.group() + got[g.end() + offset :]
--- a/pytest:testing/test_assertrewrite.py
+++ b/pytest:testing/test_assertrewrite.py
@@ -1295,11 +1295,11 @@
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*E*assert (1 + 1) == 3"])
@pytest.mark.skipif(
- sys.maxsize <= (2 ** 31 - 1), reason="Causes OverflowError on 32bit systems"
+ sys.maxsize <= (2**31 - 1), reason="Causes OverflowError on 32bit systems"
)
@pytest.mark.parametrize("offset", [-1, +1])
def test_source_mtime_long_long(pytester: Pytester, offset) -> None:
"""Support modification dates after 2038 in rewritten files (#4903).
@@ -1314,11 +1314,11 @@
"""
)
# use unsigned long timestamp which overflows signed long,
# which was the cause of the bug
# +1 offset also tests masking of 0xFFFFFFFF
- timestamp = 2 ** 32 + offset
+ timestamp = 2**32 + offset
os.utime(str(p), (timestamp, timestamp))
result = pytester.runpytest()
assert result.ret == 0
[scikit-lego - https://github.com/koaning/scikit-lego]
╰─> revision 10ac9ba996b2cacfce29ba9e7e5682176cdbca2b
--- a/scikit-lego:sklego/linear_model.py
+++ b/scikit-lego:sklego/linear_model.py
@@ -62,11 +62,11 @@
def _calc_wts(self, x_i):
distances = np.array(
[np.linalg.norm(self.X_[i, :] - x_i) for i in range(self.X_.shape[0])]
)
- weights = np.exp(-(distances ** 2) / self.sigma)
+ weights = np.exp(-(distances**2) / self.sigma)
if self.span:
weights = weights * (distances <= np.quantile(distances, q=self.span))
return weights
def predict(self, X):
@@ -169,11 +169,11 @@
def deadzone(errors):
if self.effect == "linear":
return np.where(errors > self.threshold, errors, np.zeros(errors.shape))
if self.effect == "quadratic":
return np.where(
- errors > self.threshold, errors ** 2, np.zeros(errors.shape)
+ errors > self.threshold, errors**2, np.zeros(errors.shape)
)
def training_loss(weights):
diff = np.abs(np.dot(X, weights) - y)
if self.relative:
@@ -566,11 +566,11 @@
"""
def _regularized_loss(self, params):
return +self.alpha * self.l1_ratio * np.sum(
np.abs(params)
- ) + 0.5 * self.alpha * (1 - self.l1_ratio) * np.sum(params ** 2)
+ ) + 0.5 * self.alpha * (1 - self.l1_ratio) * np.sum(params**2)
def _regularized_grad_loss(self, params):
return (
+self.alpha * self.l1_ratio * np.sign(params)
+ self.alpha * (1 - self.l1_ratio) * params
--- a/scikit-lego:tests/test_estimators/test_imbalanced_linear_regression.py
+++ b/scikit-lego:tests/test_estimators/test_imbalanced_linear_regression.py
@@ -65,11 +65,11 @@
X, y = _create_dataset(coefs, intercept)
imbs = [
ImbalancedLinearRegression(alpha=alpha, l1_ratio=0.5).fit(X, y)
for alpha in range(4)
]
- coef_size = np.array([np.sum(imb.coef_ ** 2) for imb in imbs])
+ coef_size = np.array([np.sum(imb.coef_**2) for imb in imbs])
for i in range(3):
assert coef_size[i] >= coef_size[i + 1]
--- a/scikit-lego:tests/test_estimators/test_lad_regression.py
+++ b/scikit-lego:tests/test_estimators/test_lad_regression.py
@@ -63,11 +63,11 @@
def test_coefs_and_intercept__no_noise_regularization(coefs, intercept):
"""Test model with regularization. The size of the coef vector should shrink the larger alpha gets."""
X, y = _create_dataset(coefs, intercept)
lads = [LADRegression(alpha=alpha, l1_ratio=0.0).fit(X, y) for alpha in range(3)]
- coef_size = np.array([np.sum(lad.coef_ ** 2) for lad in lads])
+ coef_size = np.array([np.sum(lad.coef_**2) for lad in lads])
for i in range(2):
assert coef_size[i] >= coef_size[i + 1]
--- a/scikit-lego:tests/test_estimators/test_quantile_regression.py
+++ b/scikit-lego:tests/test_estimators/test_quantile_regression.py
@@ -76,11 +76,11 @@
X, y = _create_dataset(coefs, intercept)
quants = [
QuantileRegression(alpha=alpha, l1_ratio=0.0).fit(X, y) for alpha in range(3)
]
- coef_size = np.array([np.sum(quant.coef_ ** 2) for quant in quants])
+ coef_size = np.array([np.sum(quant.coef_**2) for quant in quants])
for i in range(2):
assert coef_size[i] >= coef_size[i + 1]
--- a/scikit-lego:tests/test_meta/test_decay_estimator.py
+++ b/scikit-lego:tests/test_meta/test_decay_estimator.py
@@ -30,11 +30,11 @@
"mod", flatten([LinearRegression(), Ridge(), DecisionTreeRegressor()])
)
def test_decay_weight_regr(mod):
X, y = np.random.normal(0, 1, (100, 100)), np.random.normal(0, 1, (100,))
mod = DecayEstimator(mod, decay=0.95).fit(X, y)
- assert mod.weights_[0] == pytest.approx(0.95 ** 100, abs=0.001)
+ assert mod.weights_[0] == pytest.approx(0.95**100, abs=0.001)
@pytest.mark.parametrize(
"mod", flatten([DecisionTreeClassifier(), LogisticRegression(solver="lbfgs")])
)
@@ -42,11 +42,11 @@
X, y = (
np.random.normal(0, 1, (100, 100)),
(np.random.normal(0, 1, (100,)) < 0).astype(np.int),
)
mod = DecayEstimator(mod, decay=0.95).fit(X, y)
- assert mod.weights_[0] == pytest.approx(0.95 ** 100, abs=0.001)
+ assert mod.weights_[0] == pytest.approx(0.95**100, abs=0.001)
@pytest.mark.parametrize("mod", flatten([KNeighborsClassifier()]))
def test_throw_warning(mod):
X, y = np.random.normal(0, 1, (100, 100)), np.random.normal(0, 1, (100,)) < 0
--- a/scikit-lego:tests/test_preprocessing/test_orthogonal_transformer.py
+++ b/scikit-lego:tests/test_preprocessing/test_orthogonal_transformer.py
@@ -37,11 +37,11 @@
)
def test_estimator_checks(test_fn):
test_fn(OrthogonalTransformer.__name__, OrthogonalTransformer())
-def check_is_orthogonal(X, tolerance=10 ** -5):
+def check_is_orthogonal(X, tolerance=10**-5):
"""
Check if X is an column orthogonal matrix. If X is column orthogonal, then X.T * X equals the identity matrix
:param X: Matrix to check
:param tolerance: Tolerance for difference caused by rounding
:raises: AssertionError if X is not orthogonal
@@ -50,11 +50,11 @@
if np.max(np.abs(diff_with_eye)) > tolerance:
raise AssertionError("X is not orthogonal")
-def check_is_orthonormal(X, tolerance=10 ** -5):
+def check_is_orthonormal(X, tolerance=10**-5):
"""
Check if X is an column orthonormal matrix, i.e. orthogonal and with columns with norm 1.
:param X: Matrix to check
:param tolerance: Tolerance for difference caused by rounding
:raises: AssertionError if X is not orthonormal
[sqlalchemy - https://github.com/sqlalchemy/sqlalchemy.git]
╰─> revision 88b920bc630c5d46cad6bab0c4cf9905856f16a9
--- a/sqlalchemy:lib/sqlalchemy/dialects/mysql/enumerated.py
+++ b/sqlalchemy:lib/sqlalchemy/dialects/mysql/enumerated.py
@@ -167,14 +167,14 @@
"Can't use the blank value '' in a SET without "
"setting retrieve_as_bitwise=True"
)
if self.retrieve_as_bitwise:
self._bitmap = dict(
- (value, 2 ** idx) for idx, value in enumerate(self.values)
+ (value, 2**idx) for idx, value in enumerate(self.values)
)
self._bitmap.update(
- (2 ** idx, value) for idx, value in enumerate(self.values)
+ (2**idx, value) for idx, value in enumerate(self.values)
)
length = max([len(v) for v in values] + [0])
kw.setdefault("length", length)
super(SET, self).__init__(**kw)
--- a/sqlalchemy:lib/sqlalchemy/dialects/oracle/cx_oracle.py
+++ b/sqlalchemy:lib/sqlalchemy/dialects/oracle/cx_oracle.py
@@ -1336,11 +1336,11 @@
this id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). its format is unspecified.
"""
- id_ = random.randint(0, 2 ** 128)
+ id_ = random.randint(0, 2**128)
return (0x1234, "%032x" % id_, "%032x" % 9)
def do_executemany(self, cursor, statement, parameters, context=None):
if isinstance(parameters, tuple):
parameters = list(parameters)
--- a/sqlalchemy:lib/sqlalchemy/engine/default.py
+++ b/sqlalchemy:lib/sqlalchemy/engine/default.py
@@ -696,11 +696,11 @@
This id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). Its format is unspecified.
"""
- return "_sa_%032x" % random.randint(0, 2 ** 128)
+ return "_sa_%032x" % random.randint(0, 2**128)
def do_savepoint(self, connection, name):
connection.execute(expression.SavepointClause(name))
def do_rollback_to_savepoint(self, connection, name):
--- a/sqlalchemy:test/dialect/mysql/test_types.py
+++ b/sqlalchemy:test/dialect/mysql/test_types.py
@@ -533,11 +533,11 @@
Column("b7", mysql.MSBit(63)),
Column("b8", mysql.MSBit(64)),
)
return bit_table
- i, j, k, l = 255, 2 ** 32 - 1, 2 ** 63 - 1, 2 ** 64 - 1
+ i, j, k, l = 255, 2**32 - 1, 2**63 - 1, 2**64 - 1
@testing.combinations(
(([0] * 8), None),
([None, None, 0, None, None, None, None, None], None),
(([1] * 8), None),
--- a/sqlalchemy:test/dialect/oracle/test_reflection.py
+++ b/sqlalchemy:test/dialect/oracle/test_reflection.py
@@ -833,11 +833,11 @@
common = {
"always": False,
"start": 1,
"increment": 1,
"on_null": False,
- "maxvalue": 10 ** 28 - 1,
+ "maxvalue": 10**28 - 1,
"minvalue": 1,
"cycle": False,
"cache": 20,
"order": False,
}
--- a/sqlalchemy:test/dialect/postgresql/test_reflection.py
+++ b/sqlalchemy:test/dialect/postgresql/test_reflection.py
@@ -1928,17 +1928,17 @@
),
)
elif col["name"] == "id2":
is_true("identity" in col)
exp = default.copy()
- exp.update(maxvalue=2 ** 31 - 1)
+ exp.update(maxvalue=2**31 - 1)
eq_(col["identity"], exp)
elif col["name"] == "id3":
is_true("identity" in col)
exp = default.copy()
- exp.update(maxvalue=2 ** 63 - 1)
+ exp.update(maxvalue=2**63 - 1)
eq_(col["identity"], exp)
elif col["name"] == "id4":
is_true("identity" in col)
exp = default.copy()
- exp.update(maxvalue=2 ** 15 - 1)
+ exp.update(maxvalue=2**15 - 1)
eq_(col["identity"], exp)
[tox - https://github.com/tox-dev/tox.git]
╰─> revision 4cf816c97de18dca9d99dc86fec341c1d2c4a20a
--- a/tox:src/tox/helper/get_version.py
+++ b/tox:src/tox/helper/get_version.py
@@ -8,11 +8,11 @@
info = {
"executable": sys.executable,
"implementation": platform.python_implementation(),
"version_info": list(sys.version_info),
"version": sys.version,
- "is_64": sys.maxsize > 2 ** 32,
+ "is_64": sys.maxsize > 2**32,
"sysplatform": sys.platform,
"os_sep": os.sep,
"extra_version_info": getattr(sys, "pypy_version_info", None),
}
info_as_dump = json.dumps(info)
--- a/tox:src/tox/interpreters/py_spec.py
+++ b/tox:src/tox/interpreters/py_spec.py
@@ -69,8 +69,8 @@
CURRENT = PythonSpec(
"pypy" if tox.constants.INFO.IS_PYPY else "python",
sys.version_info[0],
sys.version_info[1],
- 64 if sys.maxsize > 2 ** 32 else 32,
+ 64 if sys.maxsize > 2**32 else 32,
sys.executable,
)
[virtualenv - https://github.com/pypa/virtualenv.git]
╰─> revision df8e5f29939258913817b47bb5c01767fc333cd5
--- a/virtualenv:src/virtualenv/discovery/py_info.py
+++ b/virtualenv:src/virtualenv/discovery/py_info.py
@@ -43,11 +43,11 @@
if self.implementation == "PyPy":
self.pypy_version_info = tuple(u(i) for i in sys.pypy_version_info)
# this is a tuple in earlier, struct later, unify to our own named tuple
self.version_info = VersionInfo(*list(u(i) for i in sys.version_info))
- self.architecture = 64 if sys.maxsize > 2 ** 32 else 32
+ self.architecture = 64 if sys.maxsize > 2**32 else 32
self.version = u(sys.version)
self.os = u(os.name)
# information about the prefix - determines python home
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment