Last active
December 6, 2023 04:53
-
-
Save friendzis/50568eba519df892bb14c6dfaa84bd8e to your computer and use it in GitHub Desktop.
AoC 2023
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import typing, enum | |
import pprint | |
class TokenType(enum.Enum): | |
SYMBOL='symbol' | |
PART='part' | |
class Token(): | |
def __init__(self, type: TokenType, position: typing.Union[int, tuple], value: typing.Union[int, str]): | |
self.type = type | |
self.position = position | |
self.value = value | |
def set_line(self, line: int): | |
self.line = line | |
return self | |
def __repr__(self) -> str: | |
return f"Token({self.type.value}@{self.position}: {self.value})" | |
def tokenize(line: list[str]): | |
num = [] | |
pos = [] | |
for idx, symbol in enumerate(line): | |
if symbol.isdigit(): | |
num.append(symbol) | |
pos.append(idx) | |
else: | |
if num: | |
yield(Token(TokenType.PART, (pos[0], pos[-1]), int(''.join(num)))) | |
num = [] | |
pos = [] | |
if symbol not in '.\n': | |
yield(Token(TokenType.SYMBOL, idx, symbol)) | |
def find_around(item: Token, curr: list[Token], prev: list[Token], nxt: list[Token]) -> list[Token]: | |
return([x for x in filter(lambda t: t.type == TokenType.SYMBOL and t.position in [item.position[0]-1, item.position[1]+1], curr)] + | |
[x for x in filter(lambda t: t.type == TokenType.SYMBOL and t.position in range(item.position[0]-1, item.position[1]+2), prev if prev else [])] + | |
[x for x in filter(lambda t: t.type == TokenType.SYMBOL and t.position in range(item.position[0]-1, item.position[1]+2), nxt if nxt else [])] | |
) | |
with open('day3t.txt') as f: | |
tokens = [] | |
for idx, line in enumerate(f): | |
line_tokens = [] | |
for token in tokenize(list(line)): | |
line_tokens.append(token) | |
tokens.append(line_tokens) | |
# pprint.pprint(tokens) | |
print(sum([token.value if find_around(token, tokens[idx], tokens[idx-1] if idx > 0 else None, tokens[idx+1] if idx < len(tokens)-1 else None) else 0 for idx, line_tokens in enumerate(tokens) for token in filter(lambda t: t.type == TokenType.PART, line_tokens)])) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import typing, enum | |
import pprint | |
class TokenType(enum.Enum): | |
SYMBOL='symbol' | |
PART='part' | |
class Token(): | |
def __init__(self, type: TokenType, position: typing.Union[int, tuple], value: typing.Union[int, str]): | |
self.type = type | |
self.position = position | |
self.value = value | |
def set_line(self, line: int): | |
self.line = line | |
return self | |
def __repr__(self) -> str: | |
return f"Token({self.type.value}@{self.position}: {self.value})" | |
def tokenize(line: list[str]): | |
num = [] | |
pos = [] | |
for idx, symbol in enumerate(line): | |
if symbol.isdigit(): | |
num.append(symbol) | |
pos.append(idx) | |
else: | |
if num: | |
yield(Token(TokenType.PART, (pos[0], pos[-1]), int(''.join(num)))) | |
num = [] | |
pos = [] | |
if symbol not in '.\n': | |
yield(Token(TokenType.SYMBOL, idx, symbol)) | |
def find_around(item: Token, curr: list[Token], prev: list[Token], nxt: list[Token]) -> list[Token]: | |
# print(f"find_around({item})") | |
return([x for x in filter(lambda t: t.type == TokenType.PART and item.position in [t.position[0]-1, t.position[1]+1], curr)] + | |
[x for x in filter(lambda t: t.type == TokenType.PART and item.position in range(t.position[0]-1, t.position[1]+2), prev if prev else [])] + | |
[x for x in filter(lambda t: t.type == TokenType.PART and item.position in range(t.position[0]-1, t.position[1]+2), nxt if nxt else [])] | |
) | |
def gear_ratio(parts: list[Token]) -> int: | |
# print(f"gear_ratio({parts})") | |
if len(parts) == 2: | |
return parts[0].value * parts[1].value | |
else: | |
return 0 | |
with open('day3t.txt') as f: | |
tokens = [] | |
for idx, line in enumerate(f): | |
line_tokens = [] | |
for token in tokenize(list(line)): | |
line_tokens.append(token) | |
tokens.append(line_tokens) | |
# pprint.pprint(tokens) | |
print(sum([gear_ratio(find_around(token, tokens[idx], tokens[idx-1] if idx > 0 else None, tokens[idx+1] if idx < len(tokens)-1 else None)) for idx, line_tokens in enumerate(tokens) for token in filter(lambda t: t.type == TokenType.SYMBOL, line_tokens)])) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment