Compare commits
2 commits
11d881ce2e
...
726f629e30
Author | SHA1 | Date | |
---|---|---|---|
726f629e30 | |||
403be5d4da |
79
src/day_14/mod.rs
Normal file
79
src/day_14/mod.rs
Normal file
|
@ -0,0 +1,79 @@
|
|||
mod model;
|
||||
mod parsing;
|
||||
|
||||
use aoc_runner_derive::{aoc, aoc_generator};
|
||||
use itertools::Itertools;
|
||||
pub use model::{Input, Polymer, Rule, Rules};
|
||||
pub use parsing::parse_input as parse_input_tokens;
|
||||
use yap::IntoTokens;
|
||||
|
||||
#[aoc_generator(day14)]
|
||||
pub fn parse_input(input: &str) -> Input {
|
||||
parse_input_tokens(&mut input.into_tokens()).unwrap()
|
||||
}
|
||||
|
||||
#[aoc(day14, part1)]
|
||||
pub fn part1(input: &Input) -> usize {
|
||||
solve(input, 10)
|
||||
}
|
||||
|
||||
pub fn solve(Input { polymer, rules }: &Input, iterations: usize) -> usize {
|
||||
let mut polymer = polymer.clone();
|
||||
for _ in 0..iterations {
|
||||
polymer.apply(rules)
|
||||
}
|
||||
let counts: Vec<(char, usize)> = polymer
|
||||
.element_counts()
|
||||
.iter()
|
||||
.sorted_by(|a, b| Ord::cmp(&b.1, &a.1))
|
||||
.map(|(element, count)| (*element, *count))
|
||||
.collect();
|
||||
counts.first().unwrap().1 - counts.last().unwrap().1
|
||||
}
|
||||
|
||||
#[aoc(day14, part2)]
|
||||
pub fn part2(input: &Input) -> usize {
|
||||
solve(input, 40)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
const EXAMPLE_INPUT: &str = "NNCB
|
||||
|
||||
CH -> B
|
||||
HH -> N
|
||||
CB -> H
|
||||
NH -> C
|
||||
HB -> C
|
||||
HC -> B
|
||||
HN -> C
|
||||
NN -> C
|
||||
BH -> H
|
||||
NC -> B
|
||||
NB -> B
|
||||
BN -> B
|
||||
BB -> N
|
||||
BC -> B
|
||||
CC -> N
|
||||
CN -> C";
|
||||
const RESULT_PART_1: usize = 1588;
|
||||
const RESULT_PART_2: usize = 2188189693529;
|
||||
|
||||
#[test]
|
||||
fn part1_example() {
|
||||
let result = super::part1(&super::parse_input(EXAMPLE_INPUT));
|
||||
assert_eq!(result, RESULT_PART_1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn part2_example() {
|
||||
let result = super::part2(&super::parse_input(EXAMPLE_INPUT));
|
||||
assert_eq!(result, RESULT_PART_2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_example() {
|
||||
let _example = super::parse_input(EXAMPLE_INPUT);
|
||||
}
|
||||
}
|
102
src/day_14/model.rs
Normal file
102
src/day_14/model.rs
Normal file
|
@ -0,0 +1,102 @@
|
|||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use itertools::Itertools;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Polymer {
|
||||
pub first: char,
|
||||
pub last: char,
|
||||
pub map: HashMap<(char, char), usize>,
|
||||
}
|
||||
|
||||
impl FromStr for Polymer {
|
||||
type Err = &'static str;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let mut map = HashMap::new();
|
||||
for neighbours in s.chars().tuple_windows::<(char, char)>() {
|
||||
map.entry(neighbours)
|
||||
.and_modify(|count| *count += 1)
|
||||
.or_insert(1);
|
||||
}
|
||||
let first = s.chars().next().unwrap();
|
||||
let last = s.chars().last().unwrap();
|
||||
Ok(Self { first, last, map })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct Rule {
|
||||
pub neighbours: (char, char),
|
||||
pub separator: char,
|
||||
}
|
||||
|
||||
pub struct Rules {
|
||||
pub map: HashMap<(char, char), char>,
|
||||
}
|
||||
|
||||
impl TryFrom<Vec<Rule>> for Rules {
|
||||
type Error = &'static str;
|
||||
|
||||
fn try_from(rule_vec: Vec<Rule>) -> Result<Self, Self::Error> {
|
||||
if rule_vec.is_empty() {
|
||||
return Err("rule set can't be empty");
|
||||
}
|
||||
let mut rules = Rules {
|
||||
map: Default::default(),
|
||||
};
|
||||
for rule in rule_vec {
|
||||
if rules.map.contains_key(&rule.neighbours) {
|
||||
return Err("ambigous rule set");
|
||||
}
|
||||
rules.map.insert(rule.neighbours, rule.separator);
|
||||
}
|
||||
Ok(rules)
|
||||
}
|
||||
}
|
||||
|
||||
impl Polymer {
|
||||
pub fn apply(&mut self, rules: &Rules) {
|
||||
let mut new_map = HashMap::new();
|
||||
|
||||
for (neighbours, count) in self.map.iter() {
|
||||
if let Some(separator) = rules.map.get(neighbours) {
|
||||
new_map
|
||||
.entry((neighbours.0, *separator))
|
||||
.and_modify(|old_count| *old_count += count)
|
||||
.or_insert(*count);
|
||||
new_map
|
||||
.entry((*separator, neighbours.1))
|
||||
.and_modify(|old_count| *old_count += count)
|
||||
.or_insert(*count);
|
||||
} else {
|
||||
new_map
|
||||
.entry(*neighbours)
|
||||
.and_modify(|old_count| *old_count += count)
|
||||
.or_insert(*count);
|
||||
}
|
||||
}
|
||||
self.map = new_map
|
||||
}
|
||||
pub fn element_counts(&self) -> HashMap<char, usize> {
|
||||
let mut map = HashMap::new();
|
||||
map.insert(self.first, 1);
|
||||
map.insert(self.last, 1);
|
||||
for ((left, right), count) in self.map.iter() {
|
||||
map.entry(*left)
|
||||
.and_modify(|old_count| *old_count += count)
|
||||
.or_insert(*count);
|
||||
map.entry(*right)
|
||||
.and_modify(|old_count| *old_count += count)
|
||||
.or_insert(*count);
|
||||
}
|
||||
map.iter()
|
||||
.map(|(element, count)| (*element, count / 2))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Input {
|
||||
pub polymer: Polymer,
|
||||
pub rules: Rules,
|
||||
}
|
69
src/day_14/parsing.rs
Normal file
69
src/day_14/parsing.rs
Normal file
|
@ -0,0 +1,69 @@
|
|||
use yap::Tokens;
|
||||
|
||||
use crate::parsing::{newline, parse_n};
|
||||
|
||||
use super::{Input, Polymer, Rule, Rules};
|
||||
|
||||
pub fn parse_input(tokens: &mut impl Tokens<Item = char>) -> Option<Input> {
|
||||
tokens.optional(|t| {
|
||||
let polymer = parse_polymer(t);
|
||||
let tag = newline(t) && newline(t);
|
||||
let rules = parse_rules(t);
|
||||
if let (Some(polymer), true, Some(rules)) = (polymer, tag, rules) {
|
||||
Some(Input { polymer, rules })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_polymer(tokens: &mut impl Tokens<Item = char>) -> Option<Polymer> {
|
||||
tokens.optional(|t| {
|
||||
let polymer: String = t.many(|t| parse_element(t)).collect();
|
||||
if polymer.len() >= 2 {
|
||||
polymer.parse().ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_rule(tokens: &mut impl Tokens<Item = char>) -> Option<Rule> {
|
||||
tokens.optional(|t| {
|
||||
let neighbours = parse_neighbours(t);
|
||||
let tag = t.tokens(" -> ".chars());
|
||||
let separator = parse_element(t);
|
||||
|
||||
if let (Some(neighbours), true, Some(separator)) = (neighbours, tag, separator) {
|
||||
let neighbours = (neighbours[0], neighbours[1]);
|
||||
Some(Rule {
|
||||
neighbours,
|
||||
separator,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_neighbours(tokens: &mut impl Tokens<Item = char>) -> Option<[char; 2]> {
|
||||
parse_n(tokens, |t| parse_element(t), |_| true)
|
||||
}
|
||||
|
||||
pub fn parse_element(tokens: &mut impl Tokens<Item = char>) -> Option<char> {
|
||||
tokens.optional(|t| {
|
||||
if let Some(c) = t.next() {
|
||||
if c.is_ascii_uppercase() {
|
||||
return Some(c);
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_rules(tokens: &mut impl Tokens<Item = char>) -> Option<Rules> {
|
||||
tokens.optional(|t| {
|
||||
let rules: Vec<Rule> = t.sep_by(|t| parse_rule(t), |t| newline(t)).collect();
|
||||
rules.try_into().ok()
|
||||
})
|
||||
}
|
57
src/day_15/mod.rs
Normal file
57
src/day_15/mod.rs
Normal file
|
@ -0,0 +1,57 @@
|
|||
mod model;
|
||||
mod parsing;
|
||||
|
||||
use aoc_runner_derive::{aoc, aoc_generator};
|
||||
pub use model::Grid;
|
||||
pub use parsing::parse_grid;
|
||||
use yap::IntoTokens;
|
||||
|
||||
#[aoc_generator(day15)]
|
||||
pub fn parse_input(input: &str) -> Grid {
|
||||
let grid = parse_grid(&mut input.into_tokens()).unwrap();
|
||||
Grid { grid }
|
||||
}
|
||||
|
||||
#[aoc(day15, part1)]
|
||||
pub fn part1(input: &Grid) -> usize {
|
||||
input.dijkstra((0, 0), (input.grid[0].len() - 1, input.grid.len() - 1))
|
||||
}
|
||||
|
||||
#[aoc(day15, part2)]
|
||||
pub fn part2(input: &Grid) -> usize {
|
||||
let input = input.grow();
|
||||
input.dijkstra((0, 0), (input.grid[0].len() - 1, input.grid.len() - 1))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
const EXAMPLE_INPUT: &str = "1163751742
|
||||
1381373672
|
||||
2136511328
|
||||
3694931569
|
||||
7463417111
|
||||
1319128137
|
||||
1359912421
|
||||
3125421639
|
||||
1293138521
|
||||
2311944581";
|
||||
const RESULT_PART_1: usize = 40;
|
||||
const RESULT_PART_2: usize = 315;
|
||||
|
||||
#[test]
|
||||
fn part1_example() {
|
||||
let result = super::part1(&super::parse_input(EXAMPLE_INPUT));
|
||||
assert_eq!(result, RESULT_PART_1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn part2_example() {
|
||||
let result = super::part2(&super::parse_input(EXAMPLE_INPUT));
|
||||
assert_eq!(result, RESULT_PART_2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_example() {
|
||||
super::parse_input(EXAMPLE_INPUT);
|
||||
}
|
||||
}
|
103
src/day_15/model.rs
Normal file
103
src/day_15/model.rs
Normal file
|
@ -0,0 +1,103 @@
|
|||
use std::collections::HashSet;
|
||||
|
||||
use itertools::Itertools;
|
||||
|
||||
pub struct Grid {
|
||||
pub grid: Vec<Vec<usize>>,
|
||||
}
|
||||
|
||||
impl ToString for Grid {
|
||||
fn to_string(&self) -> String {
|
||||
self.grid
|
||||
.iter()
|
||||
.map(|line| {
|
||||
line.iter()
|
||||
.map(|digit| digit.to_string())
|
||||
.collect::<String>()
|
||||
})
|
||||
.join("\n")
|
||||
}
|
||||
}
|
||||
|
||||
impl Grid {
|
||||
pub fn dijkstra(&self, start: (usize, usize), end: (usize, usize)) -> usize {
|
||||
let mut cost = Vec::new();
|
||||
let mut prev = Vec::new();
|
||||
let mut dist = Vec::new();
|
||||
let mut visited = HashSet::new();
|
||||
let mut seen = HashSet::new();
|
||||
for y in 0..self.grid.len() {
|
||||
let mut cost_line = Vec::new();
|
||||
let mut prev_line = Vec::new();
|
||||
let mut dist_line = Vec::new();
|
||||
for x in 0..self.grid[y].len() {
|
||||
cost_line.push(self.grid[y][x]);
|
||||
prev_line.push(None);
|
||||
dist_line.push(None);
|
||||
}
|
||||
cost.push(cost_line);
|
||||
prev.push(prev_line);
|
||||
dist.push(dist_line);
|
||||
}
|
||||
|
||||
seen.insert((0, 0));
|
||||
dist[start.1][start.0] = Some(0);
|
||||
|
||||
loop {
|
||||
let min_square = seen
|
||||
.iter()
|
||||
//.filter(|s| !visited.contains(*s))
|
||||
.min_by(|a, b| {
|
||||
Ord::cmp(
|
||||
&dist[a.1][a.0].unwrap_or(usize::MAX),
|
||||
&dist[b.1][b.0].unwrap_or(usize::MAX),
|
||||
)
|
||||
})
|
||||
.cloned();
|
||||
if let Some((x, y)) = min_square {
|
||||
seen.remove(&(x, y));
|
||||
let current = (x, y);
|
||||
let current_dist = dist[current.1][current.0].unwrap();
|
||||
for (x_off, y_off) in [(-1, 0), (1, 0), (0, -1), (0, 1)] {
|
||||
let neighbour = ((x as isize + x_off) as usize, (y as isize + y_off) as usize);
|
||||
if !(cost.len() > neighbour.1 && cost[neighbour.1].len() > neighbour.0)
|
||||
|| visited.contains(&neighbour)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let neighbour_dist = dist[neighbour.1][neighbour.0].unwrap_or(usize::MAX);
|
||||
let neighbour_cost = cost[neighbour.1][neighbour.0];
|
||||
if neighbour_dist > current_dist + neighbour_cost {
|
||||
prev[neighbour.1][neighbour.0] = Some(current);
|
||||
dist[neighbour.1][neighbour.0] = Some(current_dist + neighbour_cost);
|
||||
seen.insert(neighbour);
|
||||
}
|
||||
}
|
||||
visited.insert(current);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dist[end.1][end.0].unwrap()
|
||||
}
|
||||
|
||||
pub fn grow(&self) -> Self {
|
||||
let mut grid = Vec::new();
|
||||
let y_max = self.grid.len();
|
||||
let x_max = self.grid[0].len();
|
||||
for i in 0..5 {
|
||||
for y in 0..y_max {
|
||||
let mut line = Vec::new();
|
||||
for j in 0..5 {
|
||||
for x in 0..x_max {
|
||||
line.push(((self.grid[y][x] + i + j - 1) % 9) + 1)
|
||||
}
|
||||
}
|
||||
grid.push(line);
|
||||
}
|
||||
}
|
||||
Self { grid }
|
||||
}
|
||||
}
|
17
src/day_15/parsing.rs
Normal file
17
src/day_15/parsing.rs
Normal file
|
@ -0,0 +1,17 @@
|
|||
use yap::Tokens;
|
||||
|
||||
use crate::parsing::{newline, parse_digit};
|
||||
|
||||
pub fn parse_line(tokens: &mut impl Tokens<Item = char>) -> Option<Vec<usize>> {
|
||||
tokens.optional(|t| {
|
||||
let vec: Vec<usize> = t.many(|t| parse_digit(t)).collect();
|
||||
(!vec.is_empty()).then(|| vec)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_grid(tokens: &mut impl Tokens<Item = char>) -> Option<Vec<Vec<usize>>> {
|
||||
tokens.optional(|t| {
|
||||
let vec: Vec<Vec<usize>> = t.sep_by(|t| parse_line(t), |t| newline(t)).collect();
|
||||
(!vec.is_empty()).then(|| vec)
|
||||
})
|
||||
}
|
|
@ -15,5 +15,7 @@ pub mod day_10;
|
|||
pub mod day_11;
|
||||
pub mod day_12;
|
||||
pub mod day_13;
|
||||
pub mod day_14;
|
||||
pub mod day_15;
|
||||
|
||||
aoc_lib! { year = 2021 }
|
||||
|
|
Loading…
Reference in a new issue