import time


def tokenize_pattern(pattern: str, wildcard: str = '@'):
    tokens = (token for token in pattern.split(wildcard) if token)
    pattern_position = 0
    for token in tokens:
        token_shift = pattern.index(token, pattern_position)
        yield token_shift, token
        pattern_position = pattern_position + len(token) + 1


def ara_ara(input_string: str, pattern: str, wildcard: str = '@'):
    tokens = [*tokenize_pattern(pattern)]
    if not tokens:
        return 0

    _, start_token = tokens[0]
    skip = 0
    probe = input_string.find(start_token)
    while probe > -1:
        skip = skip + probe
        input_string = input_string[probe:]
        for token_shift, token in tokens[1:]:
            if input_string[token_shift:token_shift + len(token)] != token:
                skip += 1
                input_string = input_string[1:]
                probe = input_string.find(start_token)
                break
        else:
            return skip

time_start = time.monotonic()
result = ara_ara(input_string='obosralsya v proshlom primere', pattern='sr@l')
time_end = time.monotonic()

worktime = 1_000_000 * (time_end - time_start)
print(f"result: {result}, worktime: {worktime:.1f}us")