diff --git a/Lib/_pylong.py b/Lib/_pylong.py index a8bf5cd3e63..be1acd17ce3 100644 --- a/Lib/_pylong.py +++ b/Lib/_pylong.py @@ -348,7 +348,7 @@ def _dec_str_to_int_inner(s, *, GUARD=8): # off-by-1 error too low. So we add 2 instead of 1 if chopping lost # a fraction > 0.9. - # The "WASI" test platfrom can complain about `len(s)` if it's too + # The "WASI" test platform can complain about `len(s)` if it's too # large to fit in its idea of "an index-sized integer". lenS = s.__len__() log_ub = lenS * _LOG_10_BASE_256 @@ -613,7 +613,7 @@ def int_divmod(a, b): # ctx.prec = max(n.adjusted() - p256.adjusted(), 0) + GUARD # hi = +n * +recip # unary `+` chops to ctx.prec digits # -# we have 3 visible chopped operationa, but there's also a 4th: +# we have 3 visible chopped operations, but there's also a 4th: # precomputing a truncated `recip` as part of setup. # # So the computed product is exactly equal to the true product times @@ -703,7 +703,7 @@ def int_divmod(a, b): # Enable for brute-force testing of compute_powers(). This takes about a # minute, because it tries millions of cases. if 0: - def consumer(w, limir, need_hi): + def consumer(w, limit, need_hi): seen = set() need = set() def inner(w): @@ -718,7 +718,7 @@ if 0: inner(lo) inner(hi) inner(w) - exp = compute_powers(w, 1, limir, need_hi=need_hi) + exp = compute_powers(w, 1, limit, need_hi=need_hi) assert exp.keys() == need from itertools import chain diff --git a/Lib/_pyrepl/completing_reader.py b/Lib/_pyrepl/completing_reader.py index 05770aaf506..e856bb9807c 100644 --- a/Lib/_pyrepl/completing_reader.py +++ b/Lib/_pyrepl/completing_reader.py @@ -91,7 +91,7 @@ def build_menu( # D E F B E # G C F # - # "fill" the table with empty words, so we always have the same amout + # "fill" the table with empty words, so we always have the same amount # of rows for each column missing = cols*rows - len(wordlist) wordlist = wordlist + ['']*missing diff --git a/Lib/_pyrepl/unix_console.py b/Lib/_pyrepl/unix_console.py index 18b2bba91c8..7b8f5a0298b 100644 --- a/Lib/_pyrepl/unix_console.py +++ b/Lib/_pyrepl/unix_console.py @@ -109,7 +109,7 @@ delayprog = re.compile(b"\\$<([0-9]+)((?:/|\\*){0,2})>") try: poll: type[select.poll] = select.poll except AttributeError: - # this is exactly the minumum necessary to support what we + # this is exactly the minimum necessary to support what we # do with poll objects class MinimalPoll: def __init__(self): @@ -613,7 +613,7 @@ class UnixConsole(Console): # reuse the oldline as much as possible, but stop as soon as we # encounter an ESCAPE, because it might be the start of an escape - # sequene + # sequence while ( x_coord < minlen and oldline[x_pos] == newline[x_pos] diff --git a/Lib/_pyrepl/windows_console.py b/Lib/_pyrepl/windows_console.py index ba9af36b8be..6c3f7031a74 100644 --- a/Lib/_pyrepl/windows_console.py +++ b/Lib/_pyrepl/windows_console.py @@ -231,7 +231,7 @@ class WindowsConsole(Console): # reuse the oldline as much as possible, but stop as soon as we # encounter an ESCAPE, because it might be the start of an escape - # sequene + # sequence while ( x_coord < minlen and oldline[x_pos] == newline[x_pos] diff --git a/Lib/dataclasses.py b/Lib/dataclasses.py index 4cba606dd8d..141aa41c74d 100644 --- a/Lib/dataclasses.py +++ b/Lib/dataclasses.py @@ -656,7 +656,7 @@ def _init_fn(fields, std_fields, kw_only_fields, frozen, has_post_init, if kw_only_fields: # Add the keyword-only args. Because the * can only be added if # there's at least one keyword-only arg, there needs to be a test here - # (instead of just concatenting the lists together). + # (instead of just concatenating the lists together). _init_params += ['*'] _init_params += [_init_param(f) for f in kw_only_fields] func_builder.add_fn('__init__', diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py index 5148d307051..f2292c97cd8 100644 --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -536,7 +536,7 @@ class Regrtest: self._run_tests_mp(runtests, self.num_workers) else: # gh-117783: don't immortalize deferred objects when tracking - # refleaks. Only releveant for the free-threaded build. + # refleaks. Only relevant for the free-threaded build. with suppress_immortalization(runtests.hunt_refleak): self.run_tests_sequentially(runtests) diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py index adc8f1f4555..67cc9db54f7 100644 --- a/Lib/test/libregrtest/single.py +++ b/Lib/test/libregrtest/single.py @@ -305,7 +305,7 @@ def run_single_test(test_name: TestName, runtests: RunTests) -> TestResult: pgo = runtests.pgo try: # gh-117783: don't immortalize deferred objects when tracking - # refleaks. Only releveant for the free-threaded build. + # refleaks. Only relevant for the free-threaded build. with support.suppress_immortalization(runtests.hunt_refleak): _runtest(result, runtests) except: diff --git a/Lib/test/support/asynchat.py b/Lib/test/support/asynchat.py index 38c47a1fda6..a8c6b28a9e1 100644 --- a/Lib/test/support/asynchat.py +++ b/Lib/test/support/asynchat.py @@ -1,5 +1,5 @@ # TODO: This module was deprecated and removed from CPython 3.12 -# Now it is a test-only helper. Any attempts to rewrite exising tests that +# Now it is a test-only helper. Any attempts to rewrite existing tests that # are using this module and remove it completely are appreciated! # See: https://github.com/python/cpython/issues/72719 diff --git a/Lib/test/support/asyncore.py b/Lib/test/support/asyncore.py index b397aca5568..870e4283764 100644 --- a/Lib/test/support/asyncore.py +++ b/Lib/test/support/asyncore.py @@ -1,5 +1,5 @@ # TODO: This module was deprecated and removed from CPython 3.12 -# Now it is a test-only helper. Any attempts to rewrite exising tests that +# Now it is a test-only helper. Any attempts to rewrite existing tests that # are using this module and remove it completely are appreciated! # See: https://github.com/python/cpython/issues/72719 diff --git a/Lib/test/support/bytecode_helper.py b/Lib/test/support/bytecode_helper.py index 85bcd1f0f1c..f6426c3e285 100644 --- a/Lib/test/support/bytecode_helper.py +++ b/Lib/test/support/bytecode_helper.py @@ -71,7 +71,7 @@ class CompilationStepTestCase(unittest.TestCase): def assertInstructionsMatch(self, actual_seq, expected): # get an InstructionSequence and an expected list, where each - # entry is a label or an instruction tuple. Construct an expcted + # entry is a label or an instruction tuple. Construct an expected # instruction sequence and compare with the one given. self.assertIsInstance(expected, list) diff --git a/Lib/test/test_ast/test_ast.py b/Lib/test/test_ast/test_ast.py index 0a3edef4678..e83cdbcb78d 100644 --- a/Lib/test/test_ast/test_ast.py +++ b/Lib/test/test_ast/test_ast.py @@ -2701,13 +2701,13 @@ class EndPositionTests(unittest.TestCase): class NodeTransformerTests(ASTTestMixin, unittest.TestCase): - def assertASTTransformation(self, tranformer_class, + def assertASTTransformation(self, transformer_class, initial_code, expected_code): initial_ast = ast.parse(dedent(initial_code)) expected_ast = ast.parse(dedent(expected_code)) - tranformer = tranformer_class() - result_ast = ast.fix_missing_locations(tranformer.visit(initial_ast)) + transformer = transformer_class() + result_ast = ast.fix_missing_locations(transformer.visit(initial_ast)) self.assertASTEqual(result_ast, expected_ast) diff --git a/Lib/test/test_asyncio/test_locks.py b/Lib/test/test_asyncio/test_locks.py index 34509717f28..c3bff760f73 100644 --- a/Lib/test/test_asyncio/test_locks.py +++ b/Lib/test/test_asyncio/test_locks.py @@ -1194,14 +1194,14 @@ class SemaphoreTests(unittest.IsolatedAsyncioTestCase): self.assertEqual([2, 3], result) async def test_acquire_fifo_order_4(self): - # Test that a successfule `acquire()` will wake up multiple Tasks + # Test that a successful `acquire()` will wake up multiple Tasks # that were waiting in the Semaphore queue due to FIFO rules. sem = asyncio.Semaphore(0) result = [] count = 0 async def c1(result): - # First task immediatlly waits for semaphore. It will be awoken by c2. + # First task immediately waits for semaphore. It will be awoken by c2. self.assertEqual(sem._value, 0) await sem.acquire() # We should have woken up all waiting tasks now. @@ -1475,7 +1475,7 @@ class BarrierTests(unittest.IsolatedAsyncioTestCase): # first time waiting await barrier.wait() - # after wainting once for all tasks + # after waiting once for all tasks if rewait_n > 0: rewait_n -= 1 # wait again only for rewait tasks diff --git a/Lib/test/test_asyncio/test_subprocess.py b/Lib/test/test_asyncio/test_subprocess.py index 54501300a29..ec748b9bb3e 100644 --- a/Lib/test/test_asyncio/test_subprocess.py +++ b/Lib/test/test_asyncio/test_subprocess.py @@ -783,7 +783,7 @@ class SubprocessMixin: def test_subprocess_protocol_events(self): # gh-108973: Test that all subprocess protocol methods are called. - # The protocol methods are not called in a determistic order. + # The protocol methods are not called in a deterministic order. # The order depends on the event loop and the operating system. events = [] fds = [1, 2] diff --git a/Lib/test/test_capi/test_opt.py b/Lib/test/test_capi/test_opt.py index 328b6424772..81544f5b8af 100644 --- a/Lib/test/test_capi/test_opt.py +++ b/Lib/test/test_capi/test_opt.py @@ -176,7 +176,7 @@ class TestExecutorInvalidation(unittest.TestCase): self.assertTrue(exe.is_valid()) # Assert that the correct executors are invalidated # and check that nothing crashes when we invalidate - # an executor mutliple times. + # an executor multiple times. for i in (4,3,2,1,0): _testinternalcapi.invalidate_executors(objects[i]) for exe in executors[i:]: diff --git a/Lib/test/test_concurrent_futures/test_deadlock.py b/Lib/test/test_concurrent_futures/test_deadlock.py index 3c30c4558c0..f60465f695b 100644 --- a/Lib/test/test_concurrent_futures/test_deadlock.py +++ b/Lib/test/test_concurrent_futures/test_deadlock.py @@ -236,7 +236,7 @@ class ExecutorDeadlockTest: executor_manager.join() def test_crash_big_data(self): - # Test that there is a clean exception instad of a deadlock when a + # Test that there is a clean exception instead of a deadlock when a # child process crashes while some data is being written into the # queue. # https://github.com/python/cpython/issues/94777 diff --git a/Lib/test/test_email/test__header_value_parser.py b/Lib/test/test_email/test__header_value_parser.py index 5413319a414..95224e19f67 100644 --- a/Lib/test/test_email/test__header_value_parser.py +++ b/Lib/test/test_email/test__header_value_parser.py @@ -2773,7 +2773,7 @@ class TestParser(TestParserMixin, TestEmailBase): parser.get_msg_id("") diff --git a/Lib/test/test_import/__init__.py b/Lib/test/test_import/__init__.py index fd778ec216c..3d89d69955b 100644 --- a/Lib/test/test_import/__init__.py +++ b/Lib/test/test_import/__init__.py @@ -405,7 +405,7 @@ class ImportTests(unittest.TestCase): def test_double_const(self): # Importing double_const checks that float constants - # serialiazed by marshal as PYC files don't lose precision + # serialized by marshal as PYC files don't lose precision # (SF bug 422177). from test.test_import.data import double_const unload('test.test_import.data.double_const') @@ -2926,7 +2926,7 @@ class SinglephaseInitTests(unittest.TestCase): # * alive in 1 interpreter (main) # * module def still in _PyRuntime.imports.extensions # * mod init func ran again - # * m_copy is NULL (claered when the interpreter was destroyed) + # * m_copy is NULL (cleared when the interpreter was destroyed) # (was from main interpreter) # * module's global state was updated, not reset @@ -3061,7 +3061,7 @@ class SinglephaseInitTests(unittest.TestCase): # * alive in 0 interpreters # * module def in _PyRuntime.imports.extensions # * mod init func ran for the first time (since reset, at least) - # * m_copy is NULL (claered when the interpreter was destroyed) + # * m_copy is NULL (cleared when the interpreter was destroyed) # * module's global state was initialized, not reset # Use a subinterpreter that sticks around. diff --git a/Lib/xml/sax/handler.py b/Lib/xml/sax/handler.py index e8d417e5194..3183c3fe96d 100644 --- a/Lib/xml/sax/handler.py +++ b/Lib/xml/sax/handler.py @@ -371,7 +371,7 @@ class LexicalHandler: name is the name of the document element type, public_id the public identifier of the DTD (or None if none were supplied) - and system_id the system identfier of the external subset (or + and system_id the system identifier of the external subset (or None if none were supplied).""" def endDTD(self):