library(tidyverse) # ggplot, lubridate, dplyr, stringr, readr...
library(bobsburgersR)
library(praise)
Bob’s Burgers
The Data
This week we’re exploring Bob’s Burgers dialogue! Thank you to Steven Ponce for the data, and a blog post demonstrating how to visualize the data!
See the {bobsburgersR} R Package for the original transcript data, as well as additional information about each episode!
<- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2024/2024-11-19/episode_metrics.csv') episode_metrics
<- c("fart", "crap", "toilet", "buns", "diarrhea",
words "poop", "(doo doo)")
<- str_c("(?i)", str_c(words, collapse = "|"))
list list
[1] "(?i)fart|crap|toilet|buns|diarrhea|poop|(doo doo)"
<- transcript_data |>
poop_data filter(str_detect(raw_text, list)) |>
filter(!str_detect(raw_text, "(?i)farth"))
library(ggtext)
library(emo)
|>
poop_data group_by(season, episode) |>
summarize(num_wrds = n()) |>
full_join(imdb_wikipedia_data,
by = c("season", "episode")) |>
ggplot(aes(y = num_wrds, x = episode,
color = as.factor(season))) +
geom_point(size = 3, show.legend = FALSE, alpha = 0.8) +
facet_wrap(~season)
<- c("burger")
words
<- str_c("(?i)", str_c(words, collapse = "|"))
list list
[1] "(?i)burger"
<- transcript_data |>
other_data drop_na(raw_text) |>
mutate(word_in = str_detect(raw_text, list))
hamburgers… 🍔
library(ggtext)
library(emoji)
|>
other_data group_by(season, episode) |>
summarize(num_wrds = sum(word_in)) |>
full_join(imdb_wikipedia_data,
by = c("season", "episode")) |>
ggplot(aes(y = num_wrds, x = episode)) +
geom_point(shape = "\U1F354", size = 3) +
facet_wrap(~season)
|>
imdb_wikipedia_data group_by(wikipedia_directed_by) |>
summarize(num_ep = n()) |>
arrange(desc(num_ep))
# A tibble: 34 × 2
wikipedia_directed_by num_ep
<chr> <int>
1 Chris Song 46
2 Tyree Dillihay 32
3 Ryan Mattos 26
4 Brian Loschiavo 17
5 Jennifer Coyle 17
6 Tom Riggin 17
7 Don MacKinnon 12
8 Matthew Long 12
9 Ian Hamilton 11
10 Anthony Chun 8
# ℹ 24 more rows
praise()
[1] "You are legendary!"