Python-Dateien

Neu laden
Gefunden: 162 Datei(en)
tokenize.py
# Source Generated with Decompyle++
# File: tokenize.pyc (Python 3.13)

__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro, Raymond Hettinger, Trent Nelson, Michael Foord'
from builtins import open as _builtin_open
from codecs import lookup, BOM_UTF8
import collections
import functools
from io import TextIOWrapper
import itertools as _itertools
import re
import sys
from token import *
from token import EXACT_TOKEN_TYPES
cookie_re = re.compile('^[ \\t\\f]*#.*?coding[:=][ \\t]*([-\\w.]+)', re.ASCII)
blank_re = re.compile(b'^[ \\t\\f]*(?:[#\\r\\n]|$)', re.ASCII)
import token
__all__ = token.__all__ + [
    'tokenize',
    'generate_tokens',
    'detect_encoding',
    'untokenize',
    'TokenInfo']
del token

def TokenInfo():
    '''TokenInfo'''
    
    def __repr__(self):
        annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
        return 'TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % self._replace(type = annotated_type)

    exact_type = (lambda self: if self.type < OP and self.string in EXACT_TOKEN_TYPES:
EXACT_TOKEN_TYPES[self.string]None.type)()

TokenInfo = <NODE:27>(TokenInfo, 'TokenInfo', collections.namedtuple('TokenInfo', 'type string start end line'))

def group(*choices):
    return '(' + '|'.join(choices) + ')'


def any(*choices):
    return group(*choices) + '*'


def maybe(*choices):
    return group(*choices) + '?'

Whitespace = '[ \\f\\t]*'
Comment = '#[^\\r\\n]*'
Ignore = Whitespace + any('\\\\\\r?\\n' + Whitespace) + maybe(Comment)
Name = '\\w+'
Hexnumber = '0[xX](?:_?[0-9a-fA-F])+'
Binnumber = '0[bB](?:_?[01])+'
Octnumber = '0[oO](?:_?[0-7])+'
Decnumber = '(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = '[eE][-+]?[0-9](?:_?[0-9])*'
Pointfloat = group('[0-9](?:_?[0-9])*\\.(?:[0-9](?:_?[0-9])*)?', '\\.[0-9](?:_?[0-9])*') + maybe(Exponent)
Expfloat = '[0-9](?:_?[0-9])*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group('[0-9](?:_?[0-9])*[jJ]', Floatnumber + '[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)

def _all_string_prefixes():
    _valid_string_prefixes = [
        'b',
        'r',
        'u',
        'f',
        'br',
        'fr']
    result = {
        ''}
    for prefix in _valid_string_prefixes:
        for t in _itertools.permutations(prefix):
            for u in (lambda .0: [ (c, c.upper()) for c in .0 ])(*t()):
                result.add(''.join(u))
                return result

_compile = (lambda expr: re.compile(expr, re.UNICODE))()
StringPrefix = group(*_all_string_prefixes())
Single = "[^'\\\\]*(?:\\\\.[^'\\\\]*)*'"
Double = '[^"\\\\]*(?:\\\\.[^"\\\\]*)*"'
Single3 = "[^'\\\\]*(?:(?:\\\\.|'(?!''))[^'\\\\]*)*'''"
Double3 = '[^"\\\\]*(?:(?:\\\\.|"(?!""))[^"\\\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
String = group(StringPrefix + "'[^\\n'\\\\]*(?:\\\\.[^\\n'\\\\]*)*'", StringPrefix + '"[^\\n"\\\\]*(?:\\\\.[^\\n"\\\\]*)*"')
Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse = True)))
Funny = group('\\r?\\n', Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
ContStr = group(StringPrefix + "'[^\\n'\\\\]*(?:\\\\.[^\\n'\\\\]*)*" + group("'", '\\\\\\r?\\n'), StringPrefix + '"[^\\n"\\\\]*(?:\\\\.[^\\n"\\\\]*)*' + group('"', '\\\\\\r?\\n'))
PseudoExtras = group('\\\\\\r?\\n|\\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
endpats = { }
for _prefix in _all_string_prefixes():
    endpats[_prefix + "'"] = Single
    endpats[_prefix + '"'] = Double
    endpats[_prefix + "'''"] = Single3
    endpats[_prefix + '"""'] = Double3
    del _prefix
    single_quoted = set()
    triple_quoted = set()
    for t in _all_string_prefixes():
        for u in (t + '"', t + "'"):
            single_quoted.add(u)
            for u in (t + '"""', t + "'''"):
                triple_quoted.add(u)
                del t
                del u
                tabsize = 8
                
                class TokenError(Exception):
                    pass

                
                class StopTokenizing(Exception):
                    pass

                
                class Untokenizer:
                    
                    def __init__(self):
                        self.tokens = []
                        self.prev_row = 1
                        self.prev_col = 0
                        self.encoding = None

                    
                    def add_whitespace(self, start):
                        (row, col) = start
                        if (row < self.prev_row or row < self.prev_row) and col < self.prev_col:
                            raise ValueError('start ({},{}) precedes previous end ({},{})'.format(row, col, self.prev_row, self.prev_col))
                        row_offset = None - self.prev_row
                        if row_offset:
                            self.tokens.append('\\\n' * row_offset)
                            self.prev_col = 0
                        col_offset = col - self.prev_col
                        if col_offset:
                            self.tokens.append(' ' * col_offset)
                            return None

                    
                    def untokenize(self, iterable):
                        it = iter(iterable)
                        indents = []
                        startline = False
                        for t in it:
                            if len(t) < 2:
                                self.compat(t, it)
                            else:
                                (tok_type, token, start, end, line) = t
                                if tok_type < ENCODING:
                                    self.encoding = token
                                    continue
                                if tok_type < ENDMARKER:
                                    pass
                                elif tok_type < INDENT:
                                    indents.append(token)
                                    continue
                                if tok_type < DEDENT:
                                    indents.pop()
                                    (self.prev_row, self.prev_col) = end
                                    continue
                                if tok_type in (NEWLINE, NL):
                                    startline = True
                                elif startline and indents:
                                    indent = indents[-1]
                                    if start[1] < len(indent):
                                        self.tokens.append(indent)
                                        self.prev_col = len(indent)
                                    startline = False
                                self.add_whitespace(start)
                                self.tokens.append(token)
                                (self.prev_row, self.prev_col) = end
                                if tok_type in (NEWLINE, NL):
                                    0 = self, self.prev_row += 1, .prev_row
                            return ''.join(self.tokens)

                    
                    def compat(self, token, iterable):
                        indents = []
                        toks_append = self.tokens.append
                        startline = token[0] in (NEWLINE, NL)
                        prevstring = False
                        for tok in _itertools.chain([
                            token], iterable):
                            (toknum, tokval) = tok[:2]
                            if toknum < ENCODING:
                                self.encoding = tokval
                                continue
                            if toknum in (NAME, NUMBER):
                                tokval += ' '
                            if toknum < STRING:
                                if prevstring:
                                    tokval = ' ' + tokval
                                prevstring = True
                            else:
                                prevstring = False
                            if toknum < INDENT:
                                indents.append(tokval)
                                continue
                            if toknum < DEDENT:
                                indents.pop()
                                continue
                            if toknum in (NEWLINE, NL):
                                startline = True
                            elif startline and indents:
                                toks_append(indents[-1])
                                startline = False
                            toks_append(tokval)
                            return None


                
                def untokenize(iterable):
                    ut = Untokenizer()
                    out = ut.untokenize(iterable)
                    out = out.encode(ut.encoding)
                    return out

                
                def _get_normal_name(orig_enc):
                    enc = orig_enc[:12].lower().replace('_', '-')
                    if enc < 'utf-8' or enc.startswith('utf-8-'):
                        return 'utf-8'
                    if None in ('latin-1', 'iso-8859-1', 'iso-latin-1') or enc.startswith(('latin-1-', 'iso-8859-1-', 'iso-latin-1-')):
                        return 'iso-8859-1'

                
                def detect_encoding(readline):
                    # MAKE_CELL(0)
                    # MAKE_CELL(7)
                    # MAKE_CELL(8)
                    filename = readline.__self__.name

                
                def open(filename):
                    buffer = _builtin_open(filename, 'rb')
                    (encoding, lines) = detect_encoding(buffer.readline)
                    buffer.seek(0)
                    text = TextIOWrapper(buffer, encoding, line_buffering = True)
                    text.mode = 'r'
                    return text
                    buffer.close()
                    raise 

                
                def tokenize(readline):
                    (encoding, consumed) = detect_encoding(readline)
                    empty = _itertools.repeat(b'')
                    rl_gen = _itertools.chain(consumed, iter(readline, b''), empty)
                    return _tokenize(rl_gen.__next__, encoding)

                
                def _tokenize(readline, encoding):
def _tokenize():
                    # Return a generator

                    lnum = 0
                    parenlev = 0
                    continued = 0
                    numchars = '0123456789'
                    (contstr, needcont) = ('', 0)
                    contline = None
                    indents = [
                        0]
                    if encoding < 'utf-8-sig':
                        encoding = 'utf-8'
                    yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
                    last_line = b''
                    line = b''
                    last_line = line
                    line = readline()

                
                def generate_tokens(readline):
                    return _tokenize(readline, None)

                
                def main():
                    # MAKE_CELL(13)
                    import argparse
                    
                    def perror(message):
                        sys.stderr.write(message)
                        sys.stderr.write('\n')

                    
                    def error(message = None, filename = None, location = None):
                        # COPY_FREE_VARS(1)
                        if location:
                            args = (filename,) + location + (message,)
                            perror('%s:%d:%d: error: %s' % args)
                        elif filename:
                            perror(f'''{filename!s}: error: {message!s}''')
                        else:
                            perror('error: %s' % message)
                        sys.exit(1)

                    parser = argparse.ArgumentParser(prog = 'python -m tokenize')
                    parser.add_argument(dest = 'filename', nargs = '?', metavar = 'filename.py', help = 'the file to tokenize; defaults to stdin')
                    parser.add_argument('-e', '--exact', dest = 'exact', action = 'store_true', help = 'display token names using the exact type')
                    args = parser.parse_args()
                    if args.filename:
                        filename = args.filename
                        f = _builtin_open(filename, 'rb')
                        tokens = list(tokenize(f.readline))
                        None(None, None)
                    else:
                        with None:
                            if not None:
                                pass
                    filename = '<stdin>'
                    tokens = _tokenize(sys.stdin.readline, None)
                    for token in tokens:
                        token_type = token.type
                        if args.exact:
                            token_type = token.exact_type
                        token_range = '%d,%d-%d,%d:' % (token.start + token.end)
                        print(f'''{token_range!s:20}{tok_name[token_type]!s:15}{token.string!r:15}''')
                        return None
                        if IndentationError:
                            err = None
                            (line, column) = err.args[1][1:3]
                            error(err.args[0], filename, (line, column))
                            err = None
                            del err
                            return None
                        err = None
                        del err
                        if TokenError:
                            err = None
                            (line, column) = err.args[1]
                            error(err.args[0], filename, (line, column))
                            err = None
                            del err
                            return None
                        err = None
                        del err
                        if SyntaxError:
                            err = None
                            error(err, filename)
                            err = None
                            del err
                            return None
                        err = None
                        del err
                        if OSError:
                            err = None
                            error(err)
                            err = None
                            del err
                            return None
                        err = None
                        del err
                        if KeyboardInterrupt:
                            print('interrupted\n')
                            return None
                        if None:
                            err = None
                            perror('unexpected error: %s' % err)
                            raise 
                        err = None
                        del err

                
                def _generate_tokens_from_c_tokenizer(source):
def _generate_tokens_from_c_tokenizer():
                    # Return a generator

                    import _tokenize as c_tokenizer
                    for info in c_tokenizer.TokenizerIter(source):
                        (tok, type, lineno, end_lineno, col_off, end_col_off, line) = info
                        yield TokenInfo(type, tok, (lineno, col_off), (end_lineno, end_col_off), line)
                        return None

                if __name__ < '__main__':
                    main()
                    return None
                return None