diff --git a/.DS_Store b/.DS_Store index adc2f88..cbcf340 100644 Binary files a/.DS_Store and b/.DS_Store differ diff --git a/DESCRIPTION b/DESCRIPTION new file mode 100644 index 0000000..8ef7d82 --- /dev/null +++ b/DESCRIPTION @@ -0,0 +1,34 @@ +Package: datadrivencv +Type: Package +Title: Templates and helper functions for building a CV with spreadsheets +Version: 0.1.0 +URL: http://nickstrayer.me/datadrivencv, https://github.com/nstrayer/datadrivencv +Author: Nick Strayer +Maintainer: Nick Strayer +Description: Separates the CV format from the content using spreadsheets, RMarkdown, and Pagedown. Built to allow easy out-of-the-box behavior, but also to allow you to go beyond the defaults with customization and lack of lock-in to a given format. +License: MIT + file LICENSE +Encoding: UTF-8 +LazyData: true +Imports: + dplyr, + tidyr, + glue, + readr, + googlesheets4, + lubridate, + purrr, + stringr, + magrittr, + pagedown, + fs, + icon (>= 0.1.0), + whisker +RoxygenNote: 7.0.2 +Roxygen: list(markdown = TRUE) +Suggests: + knitr, + rmarkdown, + testthat (>= 2.1.0) +VignetteBuilder: knitr +Remotes: + ropenscilabs/icon diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..1e8d2b6 --- /dev/null +++ b/LICENSE @@ -0,0 +1,2 @@ +YEAR: 2020 +COPYRIGHT HOLDER: Nick Strayer diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..efba85a --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,21 @@ +# MIT License + +Copyright (c) 2020 Nick Strayer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/NAMESPACE b/NAMESPACE new file mode 100644 index 0000000..0c8bf40 --- /dev/null +++ b/NAMESPACE @@ -0,0 +1,7 @@ +# Generated by roxygen2: do not edit by hand + +export("%>%") +export(build_network_logo) +export(use_csv_data_storage) +export(use_datadriven_cv) +importFrom(magrittr,"%>%") diff --git a/R/build_network_logo.R b/R/build_network_logo.R new file mode 100644 index 0000000..9628494 --- /dev/null +++ b/R/build_network_logo.R @@ -0,0 +1,69 @@ +#' Build interactive network logo +#' +#' Constructs a network based on your position data to be used as a logo. +#' Interactive in HTML version and static in the PDF version. Notes are entries, +#' colored by section and connected if they occurred in the same year +#' +#' @param position_data position data from your `CV_Printer` class. +#' +#' @return Interactive force-directed layout network of your CV data +#' @export +build_network_logo <- function(position_data){ + + positions <- position_data %>% + dplyr::mutate( + id = dplyr::row_number(), + title = stringr::str_remove_all(title, '(\\(.+?\\))|(\\[)|(\\])'), + section = stringr::str_replace_all(section, "_", " ") %>% stringr::str_to_title() + ) + + combination_indices <- function(n){ + rep_counts <- (n:1) - 1 + dplyr::tibble( + a = rep(1:n, times = rep_counts), + b = purrr::flatten_int( purrr::map(rep_counts, ~{tail(1:n, .x)}) ) + ) + } + current_year <- lubridate::year(lubridate::ymd(Sys.Date())) + edges <- positions %>% + dplyr::select(id, start_year, end_year) %>% + dplyr::mutate( + end_year = ifelse(end_year > current_year, current_year, end_year), + start_year = ifelse(start_year > current_year, current_year, start_year) + ) %>% + purrr::pmap_dfr(function(id, start_year, end_year){ + dplyr::tibble( + year = start_year:end_year, + id = id + ) + }) %>% + dplyr::group_by(year) %>% + tidyr::nest() %>% + dplyr::rename(ids_for_year = data) %>% + purrr::pmap_dfr(function(year, ids_for_year){ + combination_indices(nrow(ids_for_year)) %>% + dplyr::transmute( + year = year, + source = ids_for_year$id[a], + target = ids_for_year$id[b] + ) + }) + + network_data <- list(nodes = dplyr::select(positions, -in_resume,-timeline), + edges = edges) %>% + jsonlite::toJSON() + + viz_script <- readr::read_file(system.file("js/cv_network.js", package = "datadrivencv")) + + glue::glue( + "", + "", + "", + "" + ) + +} diff --git a/R/use_csv_data_storage.R b/R/use_csv_data_storage.R new file mode 100644 index 0000000..3c3a5de --- /dev/null +++ b/R/use_csv_data_storage.R @@ -0,0 +1,38 @@ +#' Use CSVs for storing data +#' +#' Sets up examples of the four CSVs needed for building CV +#' +#' +#' @param folder_name Name of the folder you want csvs stored in relative to current working directory +#' @inheritParams use_ddcv_template +#' +#' @return A new folder `/` with `entries.csv`, `text_blocks.csv`, `language_skills.csv`, and `contact_info.csv` in it. +#' working directory. +#' +#' @examples +#' +#' # Make a temp directory for placing files +#' # This would be a real location for a typical situation +#' temp_dir <- fs::dir_create(fs::path(tempdir(), "cv_w_csvs")) +#' +#' datadrivencv::use_csv_data_storage( +#' folder_name = fs::path(temp_dir, "csv_data"), +#' create_output_dir = TRUE +#' ) +#' +#' list.files(fs::path(temp_dir, "csv_data")) +#' +#' @export +use_csv_data_storage <- function(folder_name = "data", create_output_dir = TRUE){ + + for(csv_file in c("entries.csv", "text_blocks.csv", "language_skills.csv","contact_info.csv" )){ + use_ddcv_template( + file_name = csv_file, + output_dir = folder_name, + create_output_dir = create_output_dir, + warn_about_no_change = TRUE + ) + } + + print(paste("Copied CSVs to ", folder_name)) +} diff --git a/R/use_datadriven_cv.R b/R/use_datadriven_cv.R new file mode 100644 index 0000000..bc0a8cd --- /dev/null +++ b/R/use_datadriven_cv.R @@ -0,0 +1,115 @@ +#' Use Data Driven CV template +#' +#' Sets up the `.Rmd` file for a data-driven cv in current working directory. +#' Also adds css file for current CV so style can be custommized. +#' +#' +#' +#' @param full_name Your full name, used in title of document and header +#' @param data_location Path of the spreadsheets holding all your data. This can +#' be either a URL to a google sheet with multiple sheets containing the four +#' data types or a path to a folder containing four `.csv`s with the neccesary +#' data. See \code{\link{use_csv_data_storage()}} for help setting up these +#' `.csv`s. +#' @param pdf_location What location will the PDF of this CV be hosted at? +#' @param html_location What location will the HTML version of this CV be hosted +#' at? +#' @param source_location Where is the code to build your CV hosted? +#' @param open_files Should the added files be opened after creation? +#' @param which_files What files should be placed? Takes a vector of possible +#' values `c("cv.rmd", "dd_cv.css", "render_cv.r", "cv_printing_functions.r")` +#' or `"all"` for everything. This can be used to incrementally update the +#' printing functions or CSS without loosing customizations you've made to +#' other files. +#' @param output_dir Where should the files be placed? Defaults to your current working directory +#' @param use_network_logo Should logo be an interactive network based on your +#' CV data? Note that this uses the function +#' \code{\link{build_network_logo()}} so will introduce a dependency on this +#' package. +#' @inheritParams use_ddcv_template +#' +#' @return `cv.rmd`, `dd_cv.css`, `render_cv.r`, and `cv_printing_functions.r` +#' written to the current working directory. +#' +#' @examples +#' +#' # Make a temp directory for placing files +#' # This would be a real location for a typical situation +#' temp_dir <- fs::dir_create(fs::path(tempdir(), "my_cv")) +#' +#' use_datadriven_cv( +#' full_name = "Nick Strayer", +#' data_location = "https://docs.google.com/spreadsheets/d/14MQICF2F8-vf8CKPF1m4lyGKO6_thG-4aSwat1e2TWc", +#' pdf_location = "https://github.com/nstrayer/cv/raw/master/strayer_cv.pdf", +#' html_location = "nickstrayer.me/cv/", +#' source_location = "https://github.com/nstrayer/cv", +#' output_dir = temp_dir, +#' open_files = FALSE +#' ) +#' +#' # Files should be where they were requested +#' list.files(temp_dir) +#' +#' @export +use_datadriven_cv <- function(full_name = "Sarah Arcos", + data_location = system.file("sample_data/", package = "datadrivencv"), + pdf_location = "https://github.com/nstrayer/cv/raw/master/strayer_cv.pdf", + html_location = "nickstrayer.me/datadrivencv/", + source_location = "https://github.com/nstrayer/datadrivencv", + which_files = "all", + output_dir = getwd(), + create_output_dir = FALSE, + use_network_logo = TRUE, + open_files = TRUE){ + + if(is.character(which_files) && which_files == "all"){ + which_files <- c("cv.rmd", "dd_cv.css", "render_cv.r", "cv_printing_functions.r") + } + # Make case-insensitive + which_files <- tolower(which_files) + + if("cv.rmd" %in% which_files){ + # Sets the main Rmd template + use_ddcv_template( + file_name = "cv.rmd", + params = list( + full_name = full_name, + data_location = data_location, + pdf_location = pdf_location, + html_location = html_location, + source_location = source_location, + use_network_logo = use_network_logo + ), + output_dir = output_dir, + create_output_dir = create_output_dir, + open_after_making = open_files + ) + } + + if("dd_cv.css" %in% which_files){ + # Place the css as well + use_ddcv_template( + file_name = "dd_cv.css", + output_dir = output_dir, + create_output_dir + ) + } + + if("render_cv.r" %in% which_files){ + use_ddcv_template( + file_name = "render_cv.r", + output_dir = output_dir, + create_output_dir, + open_after_making = open_files + ) + } + + if("cv_printing_functions.r" %in% which_files){ + use_ddcv_template( + file_name = "cv_printing_functions.r", + output_dir = output_dir, + create_output_dir + ) + } + +} diff --git a/R/use_ddcv_template.R b/R/use_ddcv_template.R new file mode 100644 index 0000000..71134f2 --- /dev/null +++ b/R/use_ddcv_template.R @@ -0,0 +1,60 @@ +#' Use template file from package +#' +#' @param file_name Name of file from templates to use: e.g. `cv.rmd`. +#' @param params Parameters used to fill in `whisker` template +#' @param output_file_name Name of file after being placed. +#' @param output_dir Directory location for output to be placed in. +#' @param create_output_dir If the requested output directory is missing should it be created? +#' @param warn_about_no_change If there is no change between the new file and what was already there, should a warning be issued? +#' @param open_after_making Should the file be opened after it has been written? +#' +#' @return NULL +use_ddcv_template <- function( + file_name, + params = NULL, + output_file_name = file_name, + output_dir = getwd(), + create_output_dir = FALSE, + warn_about_no_change = TRUE, + open_after_making = FALSE){ + output_dir_missing <- !fs::dir_exists(output_dir) + + if(output_dir_missing & create_output_dir){ + fs::dir_create(output_dir) + } else + if(output_dir_missing & !create_output_dir) { + stop(glue::glue("The requested output directory: {output_dir} doesn't exist. Either set create_output_dir = TRUE or manually make directory.")) + } + + + template_loc <- fs::path(system.file("templates/", package = "datadrivencv"), file_name) + output_loc <- fs::path(output_dir, output_file_name) + + template_text <- readr::read_file(template_loc) + + if(!is.null(params)){ + template_text <- whisker::whisker.render(template_text, data = params) + } + + # Check if file exists already + already_exists <- fs::file_exists(output_loc) + if(already_exists){ + # Check if the two files are identical + no_changes_made <- readr::read_file(output_loc) == template_text + + if(no_changes_made & warn_about_no_change){ + warning(glue::glue("{file_name} already exists and there are no differences with the current version.")) + } + } + + readr::write_file(template_text, output_loc) + + # Open the file if requested + if(open_after_making){ + if (rstudioapi::isAvailable() && rstudioapi::hasFun("navigateToFile")) { + rstudioapi::navigateToFile(output_loc) + } else { + utils::file.edit(output_loc) + } + } +} diff --git a/R/utils-pipe.R b/R/utils-pipe.R new file mode 100644 index 0000000..e79f3d8 --- /dev/null +++ b/R/utils-pipe.R @@ -0,0 +1,11 @@ +#' Pipe operator +#' +#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details. +#' +#' @name %>% +#' @rdname pipe +#' @keywords internal +#' @export +#' @importFrom magrittr %>% +#' @usage lhs \%>\% rhs +NULL diff --git a/README.Rmd b/README.Rmd new file mode 100644 index 0000000..d5131f5 --- /dev/null +++ b/README.Rmd @@ -0,0 +1,199 @@ +--- +output: github_document +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>", + fig.path = "man/figures/README-", + out.width = "100%", + echo = FALSE +) + +embed_png <- function(image_title, width = "100%") { + knitr::asis_output( + glue::glue("
", + "", + "
") + ) +} +``` + + +# datadrivencv + + + + +The goal of datadrivencv is to ease the burden of maintaining a CV by separating the content from the output by treating entries as data. + +## Installation + +The development version from [GitHub](https://github.com/) with: + +``` r +# install.packages("devtools") +devtools::install_github("nstrayer/datadrivencv") +``` + +# Motivation + +## Updating a CV is not fun + +Anytime I would go to add something to my CV I ended up wanting to change the format a tiny bit. This usually meant the entire word document completely falling apart and needing to have each entry copied and pasted into a new version. + +Ultimately this process felt formulaic and repetitive, prime indicators they could be done better with code. Using a spreadsheet to store each entry in the CV and R to write markdown seemed like the way to go. Pagedown made this even easier. Meaning that the same CV could be rendered with interactive HTML and PDF without changing the underlying rendering engine like was done with kniting to pdf vs knitting to html. + + +```{r} +embed_png("csv_to_cv.png") +``` + + +## No lock-in + +Inspired heavily the the `usethis` package, `datadrivencv` strives to make itself unnecessary. The main function is `use_data_driven_cv`, which sets up the files you need to build your CV. These files are self-contained meaning if you uninstall `datadrivencv` your CV will still knit fine. All the R code logic is contained in a sourced script so if you want to change it you can do so. + +The package aims to bootstrap you to a working data-driven CV pipeline. Serving as a jumping off point for you to build your own custom CV, you may at first want to leave it as is and then slowly tweak things to keep it fresh. You have all the code, so you can! + +# Using it + +The first step to using the package is the `use_data_driven_cv()` function. This function takes a few input parameters and when when run, sets up a series of files in your current working directory. E.g. + +```{r, eval = FALSE, echo = TRUE} +# run ?datadrivencv::use_datadriven_cv to see more details +datadrivencv::use_datadriven_cv( + full_name = "Nick Strayer", + data_location = "https://docs.google.com/spreadsheets/d/14MQICF2F8-vf8CKPF1m4lyGKO6_thG-4aSwat1e2TWc", + pdf_location = "https://github.com/nstrayer/cv/raw/master/strayer_cv.pdf", + html_location = "nickstrayer.me/cv/", + source_location = "https://github.com/nstrayer/cv" +) +``` + +The available arguments are: + +| Argument | Description | +| -------- | ----------- | +|`full_name` | Your full name, used in title of document and header | +|`data_location` | Path of the spreadsheets holding all your data. This can be either a URL to a google sheet with multiple sheets containing the four data types or a path to a folder containing four `.csv`s with the neccesary data.| +|`pdf_location` | What location will the PDF of this CV be hosted at?| +|`html_location` | What location will the HTML version of this CV be hosted at?| +|`source_location` |Where is the code to build your CV hosted? | +|`open_files` |Should the added files be opened after creation?| +|`use_network_logo` |Should logo be an interactive network based on your CV data? Note that this uses the function `build_network_logo()` so will introduce a dependency on this package.| + +This code is all that's needed to setup a full CV. It outputs five files: + +| File | Description | +| ---- | ---- | +|`cv.rmd` | An RMarkdown file with various sections filled in. Edit this to fit your personal needs. | +|`dd_cv.css` | A custom set of CSS styles that build on the default `Pagedown` "resume" template. Again, edit these as desired.| +| `render_cv.r` | Use this script to build your CV in both PDF and HTML at the same time. | +| `cv_printing_functions.r` | A series of functions that perform the dirty work of turning your spreadsheet data into markdown/html and making that output work for PDF printing. E.g. Replacing markdown links with superscripts and a links section, tweaking the CSS to account for chrome printing quirks, etc.. | + +# Storing your data in spreadsheets + +By default the `googlesheets4` package is used to get a Google Sheet with all necessary data. To build your own version I suggest simply copying [my data](https://docs.google.com/spreadsheets/d/14MQICF2F8-vf8CKPF1m4lyGKO6_thG-4aSwat1e2TWc/edit#gid=917338460), removing all the rows, and filling in with your data. + + +```{r} +embed_png("how_to_copy_data.png", width = "50%") +``` + + + +## Format of spreadsheets: + +There are four spreadsheets of "data" that are used. These take the form of separate sub-sheets within a google sheet. + +```{r} +embed_png("sub_sheets.png", width = "80%") +``` + +The four spreadsheets that are needed and their columns are: + +### `entries` +| Column | Description | +| -------------- | ------------------------- | +|`section` | Where in your CV this entry belongs | +|`title` | Main title of the entry | +|`loc` | Location the entry occured | +|`institution` | Primary institution affiliation for entry | +|`start` | Start date of entry (year). Can be left blank for single point events like a manuscript. | +|`end` | End year of entry. Set to "current" if entry is still ongoing. | +|`description_*` | Each description column is a separate bullet point for the entry. If you need more description bullet points simply add a new column with title "description_{4,5,..}"| + +### `language_skills` +| Column | Description | +| -------------- | ------------------------- | +|`skill` |Name of language| +|`level` |Relative numeric level of skill| + +### `text_blocks` + +| Column | Description | +| -------------- | ------------------------- | +| `loc` | Id used for finding text block| +| `text` | Contents of text block. Supports markdown formatting.| + +### `contact info` +| Column | Description | +| -------------- | ------------------------- | +|`loc` | Id of contact section| +|`icon` | Icon used from font-awesome 4 to label this contact section| +|`contact` | The actual value written for the contact entry| + + +## Using `.csv`s instead of google sheets + +Don't want to use google sheets to store your data? Not a problem. Just make four `.csvs` (`entries.csv, language_skills.csv, text_blocks.csv, contact_info.csv`) that have the same matching format as above and pass the folder containing those as your `data_location` when initializing with `use_datadriven_cv()`. + +The function `use_csv_data_storage()` will set these up for you. + +# Rendering your CV + +Now that you have the templates setup and you've configured your data, the last thing to do is render. The easiest way to do this is by opening `cv.rmd` in RStudio and clicking the "Knit" button. This will render an HTML version of your CV. However, you most likely want a PDF version of your CV to go along with an HTML version. The easiest way to do this is to run the included script `render_cv.r`: + +### `render_cv.r` + +```{r, eval = FALSE, echo = TRUE} +# Knit the HTML version +rmarkdown::render("cv.rmd", + params = list(pdf_mode = FALSE), + output_file = "cv.html") + +# Knit the PDF version to temporary html location +tmp_html_cv_loc <- fs::file_temp(ext = ".html") +rmarkdown::render("cv.rmd", + params = list(pdf_mode = TRUE), + output_file = tmp_html_cv_loc) + +# Convert to PDF using Pagedown +pagedown::chrome_print(input = tmp_html_cv_loc, + output = "cv.pdf") +``` + + +```{r} +embed_png("html_vs_pdf_output.png") +``` + + + +This script will render your CV in HTML and output it as `cv.html`, it will also turn on the `pdf_mode` parameter in `cv.rmd`, which will strip the links out and place them at the end linked by inline superscripts. Once the pdf version is rendered to HTML, it will then turn that HTML into a PDF using `pagedown::chrome_print()`. By using this script you can easily make sure your get both versions rendered at the same time without having to manually go in and toggle the pdf mode parameter in the yaml header and then use the print dialog in your browser. + +# Questions? + +Confused by anything (there's a lot to be confused by)? [Open an issue on github](https://github.com/nstrayer/datadrivencv/issues/new) and let me know. Not comfortable with github issues? Tweet the question at me on Twitter: [\@nicholasstrayer](https://twitter.com/NicholasStrayer). + + + + + + + + + + diff --git a/README.md b/README.md index cd0ebf1..8cbe297 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,230 @@ -# My resume created in R with Pagedown +# datadrivencv -This repo has all the code and data to render my resume in R with [pagedown package](https://pagedown.rbind.io) and it was an adaption of https://mleary.github.io/resume/ with my personal data. + ----- + -The main files are: +The goal of datadrivencv is to ease the burden of maintaining a CV by +separating the content from the output by treating entries as data. -- 'update_resume.R' : R script that renders the Rmarkdown document twice, once for HTML version and once for PDF version. This ensures both versions are always the same. -- 'resume.Rmd': Source template for both the PDF and HTML versions, based on the YAML Paramater doctype -- 'index.html': The final output of the template when the parameter doctype is set to HTML. -- 'mleary_resume.pdf': Result for single page PDF file. Note, this version has a different CSS styling sheet and is in black and white for more consistent printing. -- 'mleary_resume.html': HTML output that is used to save off pdf version. -- 'data/': CSV files with my work and skills data. I originally used an excel file, but moved this direction to make it easier to use outside of windows. -- 'css/': Directory containing the custom CSS files used to tweak the default 'resume' format from pagedown. +## Installation -Shout out to Nick Strayer, whose code was the basis for my format. I heavily borrowed from his code and approach. See the original here http://nickstrayer.me/cv/ +The development version from [GitHub](https://github.com/) with: + +``` r +# install.packages("devtools") +devtools::install_github("nstrayer/datadrivencv") +``` + +# Motivation + +## Updating a CV is not fun + +Anytime I would go to add something to my CV I ended up wanting to +change the format a tiny bit. This usually meant the entire word +document completely falling apart and needing to have each entry copied +and pasted into a new version. + +Ultimately this process felt formulaic and repetitive, prime indicators +they could be done better with code. Using a spreadsheet to store each +entry in the CV and R to write markdown seemed like the way to go. +Pagedown made this even easier. Meaning that the same CV could be +rendered with interactive HTML and PDF without changing the underlying +rendering engine like was done with kniting to pdf vs knitting to html. + +
+ + + +
+ +## No lock-in + +Inspired heavily the the `usethis` package, `datadrivencv` strives to +make itself unnecessary. The main function is `use_data_driven_cv`, +which sets up the files you need to build your CV. These files are +self-contained meaning if you uninstall `datadrivencv` your CV will +still knit fine. All the R code logic is contained in a sourced script +so if you want to change it you can do so. + +The package aims to bootstrap you to a working data-driven CV pipeline. +Serving as a jumping off point for you to build your own custom CV, you +may at first want to leave it as is and then slowly tweak things to keep +it fresh. You have all the code, so you can\! + +# Using it + +The first step to using the package is the `use_data_driven_cv()` +function. This function takes a few input parameters and when when run, +sets up a series of files in your current working directory. E.g. + +``` r +# run ?datadrivencv::use_datadriven_cv to see more details +datadrivencv::use_datadriven_cv( + full_name = "Nick Strayer", + data_location = "https://docs.google.com/spreadsheets/d/14MQICF2F8-vf8CKPF1m4lyGKO6_thG-4aSwat1e2TWc", + pdf_location = "https://github.com/nstrayer/cv/raw/master/strayer_cv.pdf", + html_location = "nickstrayer.me/cv/", + source_location = "https://github.com/nstrayer/cv" +) +``` + +The available arguments are: + +| Argument | Description | +| ------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `full_name` | Your full name, used in title of document and header | +| `data_location` | Path of the spreadsheets holding all your data. This can be either a URL to a google sheet with multiple sheets containing the four data types or a path to a folder containing four `.csv`s with the neccesary data. | +| `pdf_location` | What location will the PDF of this CV be hosted at? | +| `html_location` | What location will the HTML version of this CV be hosted at? | +| `source_location` | Where is the code to build your CV hosted? | +| `open_files` | Should the added files be opened after creation? | +| `use_network_logo` | Should logo be an interactive network based on your CV data? Note that this uses the function `build_network_logo()` so will introduce a dependency on this package. | + +This code is all that’s needed to setup a full CV. It outputs five +files: + +| File | Description | +| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `cv.rmd` | An RMarkdown file with various sections filled in. Edit this to fit your personal needs. | +| `dd_cv.css` | A custom set of CSS styles that build on the default `Pagedown` “resume” template. Again, edit these as desired. | +| `render_cv.r` | Use this script to build your CV in both PDF and HTML at the same time. | +| `cv_printing_functions.r` | A series of functions that perform the dirty work of turning your spreadsheet data into markdown/html and making that output work for PDF printing. E.g. Replacing markdown links with superscripts and a links section, tweaking the CSS to account for chrome printing quirks, etc.. | + +# Storing your data in spreadsheets + +By default the `googlesheets4` package is used to get a Google Sheet +with all necessary data. To build your own version I suggest simply +copying [my +data](https://docs.google.com/spreadsheets/d/14MQICF2F8-vf8CKPF1m4lyGKO6_thG-4aSwat1e2TWc/edit#gid=917338460), +removing all the rows, and filling in with your data. + +
+ + + +
+ +## Format of spreadsheets: + +There are four spreadsheets of “data” that are used. These take the form +of separate sub-sheets within a google sheet. + +
+ + + +
+ +The four spreadsheets that are needed and their columns are: + +### `entries` + +| Column | Description | +| --------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `section` | Where in your CV this entry belongs | +| `title` | Main title of the entry | +| `loc` | Location the entry occured | +| `institution` | Primary institution affiliation for entry | +| `start` | Start date of entry (year). Can be left blank for single point events like a manuscript. | +| `end` | End year of entry. Set to “current” if entry is still ongoing. | +| `description_*` | Each description column is a separate bullet point for the entry. If you need more description bullet points simply add a new column with title “description\_{4,5,..}” | + +### `language_skills` + +| Column | Description | +| ------- | ------------------------------- | +| `skill` | Name of language | +| `level` | Relative numeric level of skill | + +### `text_blocks` + +| Column | Description | +| ------ | ----------------------------------------------------- | +| `loc` | Id used for finding text block | +| `text` | Contents of text block. Supports markdown formatting. | + +### `contact info` + +| Column | Description | +| --------- | ----------------------------------------------------------- | +| `loc` | Id of contact section | +| `icon` | Icon used from font-awesome 4 to label this contact section | +| `contact` | The actual value written for the contact entry | + +## Using `.csv`s instead of google sheets + +Don’t want to use google sheets to store your data? Not a problem. Just +make four `.csvs` (`entries.csv, language_skills.csv, text_blocks.csv, +contact_info.csv`) that have the same matching format as above and pass +the folder containing those as your `data_location` when initializing +with `use_datadriven_cv()`. + +The function `use_csv_data_storage()` will set these up for you. + +# Rendering your CV + +Now that you have the templates setup and you’ve configured your data, +the last thing to do is render. The easiest way to do this is by opening +`cv.rmd` in RStudio and clicking the “Knit” button. This will render an +HTML version of your CV. However, you most likely want a PDF version of +your CV to go along with an HTML version. The easiest way to do this is +to run the included script `render_cv.r`: + +### `render_cv.r` + +``` r +# Knit the HTML version +rmarkdown::render("cv.rmd", + params = list(pdf_mode = FALSE), + output_file = "cv.html") + +# Knit the PDF version to temporary html location +tmp_html_cv_loc <- fs::file_temp(ext = ".html") +rmarkdown::render("cv.rmd", + params = list(pdf_mode = TRUE), + output_file = tmp_html_cv_loc) + +# Convert to PDF using Pagedown +pagedown::chrome_print(input = tmp_html_cv_loc, + output = "cv.pdf") +``` + +
+ + + +
+ +This script will render your CV in HTML and output it as `cv.html`, it +will also turn on the `pdf_mode` parameter in `cv.rmd`, which will strip +the links out and place them at the end linked by inline superscripts. +Once the pdf version is rendered to HTML, it will then turn that HTML +into a PDF using `pagedown::chrome_print()`. By using this script you +can easily make sure your get both versions rendered at the same time +without having to manually go in and toggle the pdf mode parameter in +the yaml header and then use the print dialog in your browser. + +# Questions? + +Confused by anything (there’s a lot to be confused by)? [Open an issue +on github](https://github.com/nstrayer/datadrivencv/issues/new) and let +me know. Not comfortable with github issues? Tweet the question at me on +Twitter: [@nicholasstrayer](https://twitter.com/NicholasStrayer). + + + + + + + + + + + + + + + + diff --git a/_pkgdown.yml b/_pkgdown.yml new file mode 100644 index 0000000..3015129 --- /dev/null +++ b/_pkgdown.yml @@ -0,0 +1,12 @@ +destination: docs +home: + title: An R package for building your CV with data + description: Templates and helper functions for building a CV with spreadsheets +template: + opengraph: + image: + src: man/figures/logo.svg + alt: "Build your CV with data" + twitter: + creator: "@nicholasstrayer" + card: summary_large_image diff --git a/cv.Rmd b/cv.Rmd new file mode 100644 index 0000000..1c040d8 --- /dev/null +++ b/cv.Rmd @@ -0,0 +1,120 @@ +--- +title: "Bianca Muniz's CV" +author: Bianca Muniz +date: "`r Sys.Date()`" +params: + pdf_mode: + value: true +output: + pagedown::html_resume: + css: ['dd_cv.css', 'resume'] + self_contained: true +--- + +```{r, include=FALSE} +knitr::opts_chunk$set( + results='asis', + echo = FALSE +) + +library(magrittr) # For the pipe +source("cv_printing_functions.r") + +# Read in all data and initialize a CV printer object +CV <- create_CV_object( + data_location = "https://docs.google.com/spreadsheets/d/1E7F5P5SA0-RW2OeCfpuQSEEGTjvc03_xcYQvnqR_zuA/edit#gid=1730172225", + pdf_mode = params$pdf_mode +) + +``` + +```{r} +# When in pdf export mode the little dots are unaligned, so fix that with some conditional CSS. +if(params$pdf_mode) { + cat(" +") +} +``` + +# Aside + +```{r} +if(params$pdf_mode){ + cat("View this CV online with links at biamuniz.github.io/resume/") +} else { + cat("[ Download a PDF of this CV](https://github.com/biamuniz/resume/raw/master/bmuniz_cv.pdf)") +} +``` + +## Contact {#contact} + +```{r} +CV %>% print_contact_info() +``` + +## Languages {#skills} + +```{r} +CV %>% print_skill_bars() +``` + + +## Tech skills {#hard} + +```{r} +CV %>% print_hard_bars() +``` + +





















+ + +## Soft skills {#soft} + +```{r} +CV %>% print_soft_bars() +``` + + +## Disclaimer {#disclaimer} + +Made with the R package [**pagedown**](https://github.com/rstudio/pagedown). + +The source code is available [on github.com/biamuniz/resume](https://github.com/biamuniz/resume). + +Last updated on `r Sys.Date()`. + +# Main + +## Bianca Muniz {#title} + +```{r} +# Note the special double pipe so we modify the CV object in place +CV %<>% print_text_block("intro") +``` + +## Work Experience {data-icon="suitcase"} + +```{r} +CV %<>% print_section('work') +``` + +## Education {data-icon="graduation-cap"} + +```{r} +CV %<>% print_section('education') +``` + + + +## Additional experiences {data-icon="presentation"} + +```{r} +CV %<>% print_section('talks') +``` + + + diff --git a/en/biancamuniz_resume_pdf_en.html b/cv.html similarity index 95% rename from en/biancamuniz_resume_pdf_en.html rename to cv.html index 3dd8e06..9d1a21e 100644 --- a/en/biancamuniz_resume_pdf_en.html +++ b/cv.html @@ -7,8 +7,8 @@ - - bmuniz_resume_en + + Bianca Muniz’s CV +

Aside

+

View this CV online with links at biamuniz.github.io/resume/

Contact

-


+

Languages

+
+Portuguese - Native +
+
+English – Advanced +
+
+Spanish – Basic +
+
+

Tech skills

-

+
+R - Intermediate
-
-

R skills

-

+
+Python - Intermediate +
+
+HTML - Basic +
+
+Excel - Advanced +
+
+Flourish.studio - Advanced +
+
+Figma - Advanced +
+
+Data analysis - Advanced +
+























+
+
+

Soft skills

+
+Project mangement – Advanced +
+
+Data analysis - Advanced +
+
+Scientific method +
+
+FOIA/LAI request
-
-

Languages

-
    -
  • Portuguese
  • -
  • English
  • -
  • Spanish
  • -

Disclaimer

-

Made w/ pagedown::html_resume -Code: github.com/biamuniz/resume

-

Last updated on 2023-12-02.

+

Made with the R package pagedown.

+

The source code is available on github.com/biamuniz/resume.

+

Last updated on 2023-12-03.

Main

Bianca Muniz

-

I enjoy working with data to find and deliver insights, solve business problems, and build tools that help people work more efficiently. I moved into data science after 10+ years working in public policy and public relations.

+

Data analyst at the Agência Pública de Jornalismo Investigativo, with a master’s degree in Science from Unifesp and specialization in Data Journalism, Automation and Data Storytelling from Insper. I have experience in data analysis and programming in R and Python.

-
-

Professional Experience

+
+

Work Experience

Data Analyst

Agência Pública de Jornalismo Investigativo

-

Sao Paulo-SP

+

N/A

Present - 2022

  • Create automation solutions for the data journalism workflow and assist in works that are data-driven
  • -
  • Participated in the Map of Conflict, a project by the investigative journalism outlet Agência Pública, in partnership with the Pastoral Land Commission (CPT). Using original data analysis, it investigates instances of rural conflict in Brazil’s Legal Amazon region in the last decade (2011-2020)
  • +
  • Participated in the Map of Conflict, a project by Agência Pública, in partnership with the Pastoral Land Commission (CPT). Using original data analysis, it investigates instances of rural conflict in Brazil’s Legal Amazon region in the last decade (2011-2020). This project was shortlisted at Claudio Weber Abramo Award for Data Journalism (2022) and Sigma Awards (2023), and won Design for Better World Award (2022) and was 2nd place at 39th Human Rights of Journalism Award
  • +
  • Agência Nacional de Águas e Saneamento Award 2023 - Shortlisted
  • +
  • Honorable mention at 39th Human Rights of Journalism Award with podcast episode “Quem lucra com os rios que secam”
@@ -32542,31 +32574,23 @@

Data Journalism Intern

N/A

2022 - 2020

    -
  • Participated in coverage recognized for the Roche Health Journalism Award and finalist reports of Sigma Awards and Claudio Weber Abramo Award from Data Journalism.
  • -
  • +
  • Participated in coverage recognized for the Roche Health Journalism Award and finalist reports of Sigma Awards (2021) and Claudio Weber Abramo Award from Data Journalism (2022).
  • +
  • Amazon Rainforest Journalism Fund grantee, from Pulitzer Center (2021)

Vice-President Director

Jornalismo Júnior

N/A

-

2019

+

2019 - 2018

  • Headed the financial planning of the Jornalismo Junior, with activities like: cash flow update, and legal regulation
  • Managed internal and external projects. One of them was “São Paulo sem Migué”, a fact-checking project about the city of São Paulo
  • -
-
-
-

News reporter

-

Jornalismo Júnior

-

N/A

-

2018

-
    -
  • Wrote articles about sciences, sports, culture and politics
  • +
  • Before the director role, wrote articles about sciences, sports, culture and politics as a news reporter
-
+

Education

Bachelor of Journalism

@@ -32576,6 +32600,7 @@

Bachelor of Journalism

  • Member of the Junior Enterprise of Journalism at ECA -USP (2018 - 2019)
  • Monitor of the Summer Courses of the Institute of Mathematics and Statistics (IME) of USP “Python for Data Analysis” (2022) and “R for data analysis” (2023)
  • +
  • Entrepreneurship and Innovation Scholarship (Agência USP de Inovação - AUSPIN, 2022). With this scholarship, I developed a research project for four months at University of Texas at Austin about what data journalism teaching is like in the USA
@@ -32585,7 +32610,6 @@

Master in Data Journalism, Automation and Data Storytelling

2022 - 2021

  • Specialization with double certification
  • -
@@ -32594,8 +32618,8 @@

MS in Pharmacology

N/A

2022 - 2018

    -
  • Master in Sciences from the Postgraduate Program in Pharmacology (Concept CAPES 6), with a project entitled “Pharmacological and Non-Farmacological Manipulations for the reestablishment of a” hedonic tone “in an animal model of schizophrenia: the SHR strain”
  • -
  • CAPES scholarship (2018 - 2019)
  • +
  • Master in Sciences from the Postgraduate Program in Pharmacology (Concept CAPES 6)
  • +
  • Developed a project entitled “Pharmacological and Non-Farmacological Manipulations for the reestablishment of a” hedonic tone “in an animal model of schizophrenia: the SHR strain”. CAPES scholarship (2018 - 2019)
@@ -32607,41 +32631,11 @@

Bachelor of Biomedicine

  • Participated in the development of the “Patógenos em Jogo”, an extension project of Unifesp (2016)
  • Coordinated the XV Biomedicine Winter Course, at Unifesp (2017)
  • +
    -
    -

    Awards

    -
    -

    Awards, shortlists and honorable mentions

    -

    Associated with the Agência Pública de Jornalismo Investigativo

    -

    N/A

    -

    2021

    -
      -
    • Claudio Weber Abramo Award for Data Journalism - Shortlist (2021)
    • -
    • The Sigma Awards 2021 - Shortlisted with the work
    • -
    • Roche Health Journalism Award (Honorable Mention)
    • -
    • 39th Human Rights of Journalism Award
    • -
    • 39th Human Rights of Journalism Award
    • -
    • Claudio Weber Abramo Award for Data Journalism - Shortlist (2022)
    • -
    • The Sigma Awards 2022 - Shortlisted with the work “Map of Conflict”
    • -
    • Agência Nacional de Águas e Saneamento Award 2023 - Shortlisted
    • -
    • 39th Human Rights Award for Journalism (Honorable Mention)
    • -
    • Design for Better World Award 2022 - Winner with the work “Map of Conflict”
    • -
    -
    -
    -

    Scholarships and Fellowships

    -

    N/A

    -

    N/A

    -

    N/A

    -
      -
    • Entrepreneurship and Innovation Scholarship (Agência USP de Inovação - AUSPIN, 2022). With this scholarship, I developed a research project for four months at University of Texas at Austin about what data journalism teaching is like in the USA
    • -
    • Amazon Rainforest Journalism Fund, from Pulitzer Center (2021)
    • -
    -
    -
    -
    -

    Talks

    +
    +

    Additional experiences

    Lectures and workshops

    N/A

    @@ -32653,29 +32647,24 @@

    Lectures and workshops

  • Climate Data Laboratory - Alma Preta Jornalismo (2023, Escola de Dados)
  • Lecture “Dataviz at the Agência Pública” (2023, Unisinos)
  • Introduction to R and Tidyverse (2023, R-Ladies São Paulo)
  • -
  • Workshop “Python + R together with Quarto: your reports will never be the same” (2023, CODA Amazônia)
  • -
  • Workshop “Spreadsheet: a great ally of data analysis” (2023, CODA Amazônia)
  • -
  • Lecture: “Data visualization reveal social problems” (2023, Domingo de Dados - 18th Abraji Congress)
  • +
  • Workshops at CODA Amazônia 2023 and CODA.Br 2022
  • +
  • Lectures at the Abraji Congress in 2022 and 2023
  • Workshop “Open Data Analysis with R - Open Data Day” (2023, R -Ladies São Paulo)
  • -
  • Workshop “Data journalism with spreadsheet editors” (2022, CODA.Br)
  • -
  • Webinar “the public in data and the map of conflicts” (2022, data school)
  • -
  • Data journalism to change realities (2022, Data Sunday - 17th Abraji Congress)
  • Lecture: “Business Chain - Data in all sectors of the economy: Data journalism” (2021, BIX Technology)
  • -
    -
    -

    Voluntary work

    +
    +

    Voluntary work

    +

    N/A

    +

    N/A

    +

    N/A

      -
    • R-Ladies São Paulo
    • -
    • Cursinho pré-vestibular Jeannine Aboulafia
    • +
    • Volunteered at the Texas Tribune Festival, an event about politics and public policy in Texas
    • +
    • Taught literature at the Unifesp pre-university course
    • +
    • Member of R-Ladies São Paulo; R-Ladies is an organization that promotes gender diversity in the R programming language community. As a member, I manage the social media and coordinate events
    • +
    • Volunteered at Austin Parks Foundation. In this role, I worked as a team with people from different regions of the state of Texas to ensure the sustainability of the ACL Festival.
    -
    -

    Disclaimer

    -

    Made w/ pagedown::html_resume -Code: github.com/biamuniz/resume

    -

    Last updated on 2023-12-02.

    diff --git a/cv_printing_functions.r b/cv_printing_functions.r new file mode 100644 index 0000000..cc13a7c --- /dev/null +++ b/cv_printing_functions.r @@ -0,0 +1,278 @@ +# This file contains all the code needed to parse and print various sections of your CV +# from data. Feel free to tweak it as you desire! + + +#' Create a CV_Printer object. +#' +#' @param data_location Path of the spreadsheets holding all your data. This can be +#' either a URL to a google sheet with multiple sheets containing the four +#' data types or a path to a folder containing four `.csv`s with the neccesary +#' data. +#' @param source_location Where is the code to build your CV hosted? +#' @param pdf_mode Is the output being rendered into a pdf? Aka do links need +#' to be stripped? +#' @param sheet_is_publicly_readable If you're using google sheets for data, +#' is the sheet publicly available? (Makes authorization easier.) +#' @return A new `CV_Printer` object. +create_CV_object <- function(data_location, + pdf_mode = FALSE, + sheet_is_publicly_readable = TRUE) { + + cv <- list( + pdf_mode = pdf_mode, + links = c() + ) + + is_google_sheets_location <- stringr::str_detect(data_location, "docs\\.google\\.com") + + if(is_google_sheets_location){ + if(sheet_is_publicly_readable){ + # This tells google sheets to not try and authenticate. Note that this will only + # work if your sheet has sharing set to "anyone with link can view" + googlesheets4::gs4_deauth() + } else { + # My info is in a public sheet so there's no need to do authentication but if you want + # to use a private sheet, then this is the way you need to do it. + # designate project-specific cache so we can render Rmd without problems + options(gargle_oauth_cache = ".secrets") + } + + read_gsheet <- function(sheet_id){ + googlesheets4::read_sheet(data_location, sheet = sheet_id, skip = 1, col_types = "c") + } + cv$entries_data <- read_gsheet(sheet_id = "entries") + cv$skills <- read_gsheet(sheet_id = "language_skills") + cv$soft <- read_gsheet(sheet_id = "soft_skills") + cv$hard <- read_gsheet(sheet_id = "hard_skills") + cv$text_blocks <- read_gsheet(sheet_id = "text_blocks") + cv$contact_info <- read_gsheet(sheet_id = "contact_info") + } else { + # Want to go old-school with csvs? + cv$entries_data <- readr::read_csv(paste0(data_location, "entries.csv"), skip = 1) + cv$skills <- readr::read_csv(paste0(data_location, "language_skills.csv"), skip = 1) + cv$text_blocks <- readr::read_csv(paste0(data_location, "text_blocks.csv"), skip = 1) + cv$contact_info <- readr::read_csv(paste0(data_location, "contact_info.csv"), skip = 1) + } + + + extract_year <- function(dates){ + date_year <- stringr::str_extract(dates, "(20|19)[0-9]{2}") + date_year[is.na(date_year)] <- lubridate::year(lubridate::ymd(Sys.Date())) + 10 + + date_year + } + + parse_dates <- function(dates){ + + date_month <- stringr::str_extract(dates, "(\\w+|\\d+)(?=(\\s|\\/|-)(20|19)[0-9]{2})") + date_month[is.na(date_month)] <- "1" + + paste("1", date_month, extract_year(dates), sep = "-") %>% + lubridate::dmy() + } + + # Clean up entries dataframe to format we need it for printing + cv$entries_data %<>% + tidyr::unite( + tidyr::starts_with('description'), + col = "description_bullets", + sep = "\n- ", + na.rm = TRUE + ) %>% + dplyr::mutate( + description_bullets = ifelse(description_bullets != "", paste0("- ", description_bullets), ""), + start = ifelse(start == "NULL", NA, start), + end = ifelse(end == "NULL", NA, end), + start_year = extract_year(start), + end_year = extract_year(end), + no_start = is.na(start), + has_start = !no_start, + no_end = is.na(end), + has_end = !no_end, + timeline = dplyr::case_when( + no_start & no_end ~ "N/A", + no_start & has_end ~ as.character(end), + has_start & no_end ~ paste("Current", "-", start), + TRUE ~ paste(end, "-", start) + ) + ) %>% + dplyr::arrange(desc(parse_dates(end))) %>% + dplyr::mutate_all(~ ifelse(is.na(.), 'N/A', .)) + + cv +} + + +# Remove links from a text block and add to internal list +sanitize_links <- function(cv, text){ + if(cv$pdf_mode){ + link_destinations <- stringr::str_extract_all(text, '(?<=\\().+?(?=\\))')[[1]] + + n_links <- length(cv$links) + n_new_links <- length(link_destinations) + + if(n_new_links > 0){ + # add links to links array + cv$links <- c(cv$links, link_destinations) + } + } + + list(cv = cv, text = text) +} + + + + +#' @description Take a position data frame and the section id desired and prints the section to markdown. +#' @param section_id ID of the entries section to be printed as encoded by the `section` column of the `entries` table +print_section <- function(cv, section_id, glue_template = "default"){ + + if(glue_template == "default"){ + glue_template <- " +### {title} + +{loc} + +{institution} + +{timeline} + +{description_bullets} +\n\n\n" + } + + section_data <- dplyr::filter(cv$entries_data, section == section_id) + + # Take entire entries data frame and removes the links in descending order + # so links for the same position are right next to each other in number. + for(i in 1:nrow(section_data)){ + for(col in c('title', 'description_bullets')){ + strip_res <- sanitize_links(cv, section_data[i, col]) + section_data[i, col] <- strip_res$text + cv <- strip_res$cv + } + } + + print(glue::glue_data(section_data, glue_template)) + + invisible(strip_res$cv) +} + + + +#' @description Prints out text block identified by a given label. +#' @param label ID of the text block to print as encoded in `label` column of `text_blocks` table. +print_text_block <- function(cv, label){ + text_block <- dplyr::filter(cv$text_blocks, loc == label) %>% + dplyr::pull(text) + + strip_res <- sanitize_links(cv, text_block) + + cat(strip_res$text) + + invisible(strip_res$cv) +} + + + +#' @description Construct a bar chart of skills +#' @param out_of The relative maximum for skills. Used to set what a fully filled in skill bar is. +print_skill_bars <- function(cv, out_of = 3, bar_color = "lightpink", bar_background = "white", glue_template = "default"){ + + if(glue_template == "default"){ + glue_template <- " +
    {skill}
    " + } + cv$skills %>% + dplyr::mutate(width_percent = round(100*as.numeric(level)/out_of)) %>% + glue::glue_data(glue_template) %>% + print() + + invisible(cv) +} + + +#' @description Construct a bar chart of skills +#' @param out_of The relative maximum for skills. Used to set what a fully filled in skill bar is. +print_hard_bars <- function(cv, out_of = 3, bar_color = "lightpink", bar_background = "white", glue_template = "default"){ + + if(glue_template == "default"){ + glue_template <- " +
    {skill}
    " + } + cv$hard %>% + dplyr::mutate(width_percent = round(100*as.numeric(level)/out_of)) %>% + glue::glue_data(glue_template) %>% + print() + + invisible(cv) +} + + + +#' @description Construct a bar chart of skills +#' @param out_of The relative maximum for skills. Used to set what a fully filled in skill bar is. +print_soft_bars <- function(cv, out_of = 3, bar_color = "lightpink", bar_background = "white", glue_template = "default"){ + + if(glue_template == "default"){ + glue_template <- " +
    {skill}
    " + } + cv$soft %>% + dplyr::mutate(width_percent = round(100*as.numeric(level)/out_of)) %>% + glue::glue_data(glue_template) %>% + print() + + invisible(cv) +} + + + + +#' @description List of all links in document labeled by their superscript integer. +print_links <- function(cv) { + n_links <- length(cv$links) + if (n_links > 0) { + cat(" +Links {data-icon=link} +-------------------------------------------------------------------------------- + +
    + + +") + + purrr::walk2(cv$links, 1:n_links, function(link, index) { + print(glue::glue('{index}. {link}')) + }) + } + + invisible(cv) +} + + + +#' @description Contact information section with icons +print_contact_info <- function(cv){ + glue::glue_data( + cv$contact_info, + "- {contact}" + ) %>% print() + + invisible(cv) +} diff --git a/data/.DS_Store b/data/.DS_Store deleted file mode 100644 index 5008ddf..0000000 Binary files a/data/.DS_Store and /dev/null differ diff --git a/data/archve_old_resume_data.xlsx b/data/archve_old_resume_data.xlsx deleted file mode 100644 index 6c780bf..0000000 Binary files a/data/archve_old_resume_data.xlsx and /dev/null differ diff --git a/data/bmuniz_cv_data - contact_info.csv b/data/bmuniz_cv_data - contact_info.csv new file mode 100644 index 0000000..a52fe8b --- /dev/null +++ b/data/bmuniz_cv_data - contact_info.csv @@ -0,0 +1,5 @@ +Id of contact section,Icon used from font-awesome 4 to label this contact section,The actual value written for the contact entry +loc,icon,contact +email,envelope,biancamuniz@apublica.org +github,github,github.com/biamuniz +linkedin,linkedin,linkedin.com/in/bmuniz \ No newline at end of file diff --git a/data/bmuniz_cv_data - entries.csv b/data/bmuniz_cv_data - entries.csv new file mode 100644 index 0000000..98c5857 --- /dev/null +++ b/data/bmuniz_cv_data - entries.csv @@ -0,0 +1,12 @@ +section,title,loc,institution,start,end,description_1,description_2,description_3,description_4,description_5,description_6,description_7,description_8,description_9,description_10,description_11,description_12,description_13,in_resume +work,Data Analyst,São Paulo,Agência Pública de Jornalismo Investigativo,2022,Present,Create automation solutions for the data journalism workflow and assist in works that are data-driven,"Participated in the Map of Conflict, a project by the investigative journalism outlet Agência Pública, in partnership with the Pastoral Land Commission (CPT). Using original data analysis, it investigates instances of rural conflict in Brazil’s Legal Amazon region in the last decade (2011-2020)",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,TRUE +education,Bachelor of Journalism,São Paulo,University of São Paulo (USP),2018,2023,Member of the Junior Enterprise of Journalism at ECA -USP (2018 - 2019),"Monitor of the Summer Courses of the Institute of Mathematics and Statistics (IME) of USP ""Python for Data Analysis"" (2022) and ""R for data analysis"" (2023)",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,TRUE +education,"Master in Data Journalism, Automation and Data Storytelling",São Paulo,Insper,2021,2022,Specialization with double certification,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,TRUE +work,Data Journalism Intern,São Paulo,Agência Pública de Jornalismo Investigativo,2020,2022,Participated in coverage recognized for the Roche Health Journalism Award and finalist reports of Sigma Awards and Claudio Weber Abramo Award from Data Journalism.,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,TRUE +education,MS in Pharmacology,São Paulo,Federal University of São Paulo - UNIFESP,2018,2022,"Master in Sciences from the Postgraduate Program in Pharmacology (Concept CAPES 6), with a project entitled ""Pharmacological and Non-Farmacological Manipulations for the reestablishment of a"" hedonic tone ""in an animal model of schizophrenia: the SHR strain""",CAPES scholarship (2018 - 2019),NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,TRUE +awards,"Awards, shortlists and honorable mentions",São Paulo,Associated with the Agência Pública de Jornalismo Investigativo,2020,Present,Claudio Weber Abramo Award for Data Journalism - Shortlist (2021),The Sigma Awards 2021 - Shortlisted with the work ,Roche Health Journalism Award (Honorable Mention),39th Human Rights of Journalism Award,39th Human Rights of Journalism Award,Claudio Weber Abramo Award for Data Journalism - Shortlist (2022),"The Sigma Awards 2022 - Shortlisted with the work ""Map of Conflict""",Agência Nacional de Águas e Saneamento Award 2023 - Shortlisted,39th Human Rights Award for Journalism (Honorable Mention),"Design for Better World Award 2022 - Winner with the work ""Map of Conflict""",NA,NA,NA,TRUE +work,Vice-President Director,São Paulo,Jornalismo Júnior,2019,2019,"Headed the financial planning of the Jornalismo Junior, with activities like: cash flow update, and legal regulation","Managed internal and external projects. One of them was ""São Paulo sem Migué"", a fact-checking project about the city of São Paulo",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,TRUE +work,News reporter,São Paulo,Jornalismo Júnior,2018,2018,"Wrote articles about sciences, sports, culture and politics",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,TRUE +education,Bachelor of Biomedicine,São Paulo,Federal University of São Paulo - UNIFESP,2014,2017,"Participated in the development of the ""Patógenos em Jogo"", an extension project of Unifesp (2016)","Coordinated the XV Biomedicine Winter Course, at Unifesp (2017)",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,TRUE +talks,Lectures and workshops,São Paulo,NA,2021,2023,"Lecture: ""Step by step of a data driven story with the Map of Conflict"" (2023, ""Coverage of Socio-Environmental Conflicts"" - Portal Assobiar)","Lecture: ""Intro to Data Journalism"" at the ""Communication Course: Practice and Reflection"" (2023, Colabora)","Climate Data Laboratory - Alma Preta Jornalismo (2023, Escola de Dados)","Lecture ""Dataviz at the Agência Pública"" (2023, Unisinos)","Introduction to R and Tidyverse (2023, R-Ladies São Paulo)","Workshop “Python + R together with Quarto: your reports will never be the same” (2023, CODA Amazônia)","Workshop “Spreadsheet: a great ally of data analysis” (2023, CODA Amazônia)","Lecture: ""Data visualization reveal social problems"" (2023, Domingo de Dados - 18th Abraji Congress)","Workshop ""Open Data Analysis with R - Open Data Day"" (2023, R -Ladies São Paulo)","Workshop “Data journalism with spreadsheet editors” (2022, CODA.Br)","Webinar “the public in data and the map of conflicts” (2022, data school)","Data journalism to change realities (2022, Data Sunday - 17th Abraji Congress)","Lecture: ""Business Chain - Data in all sectors of the economy: Data journalism"" (2021, BIX Technology)",TRUE +awards,Scholarships and Fellowships,São Paulo,NA,2021,2023,"Entrepreneurship and Innovation Scholarship (Agência USP de Inovação - AUSPIN, 2022). With this scholarship, I developed a research project for four months at University of Texas at Austin about what data journalism teaching is like in the USA","Amazon Rainforest Journalism Fund, from Pulitzer Center (2021)",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,TRUE \ No newline at end of file diff --git a/data/bmuniz_cv_data - language_skills.csv b/data/bmuniz_cv_data - language_skills.csv new file mode 100644 index 0000000..51cc39e --- /dev/null +++ b/data/bmuniz_cv_data - language_skills.csv @@ -0,0 +1,5 @@ +Name of language,Relative numeric level of skill +skill,level +R,5 +Python,5 +HTML,3.5 \ No newline at end of file diff --git a/data/bmuniz_cv_data - text_blocks.csv b/data/bmuniz_cv_data - text_blocks.csv new file mode 100644 index 0000000..aa400f1 --- /dev/null +++ b/data/bmuniz_cv_data - text_blocks.csv @@ -0,0 +1,7 @@ +Id used for finding text block,Contents of text block. Supports markdown formatting. +loc,text +intro,"I have made [visualizations viewed by hundreds of thousands of people](https://www.nytimes.com/interactive/2016/08/26/us/college-student-migration.html), [sped up query times for 25 terabytes of data by an average of 4,800 times](https://livefreeordichotomize.com/2019/06/04/using_awk_and_r_to_parse_25tb/), and built [packages for R](https://github.com/nstrayer/shinysense) that let you [do magic](http://nickstrayer.me/dataDayTexas/). +" +industy_experience_aside,I have worked in a variety of roles ranging from journalist to software engineer to data scientist. I like collaborative environments where I can learn from my peers. +teaching_experience_aside,I am passionate about education. I believe that no topic is too complex if the teacher is empathetic and willing to think about new methods of approaching task. +data_science_writing_aside,I regularly blog about data science and visualization on my blog [LiveFreeOrDichotomize.](https://livefreeordichotomize.com/) \ No newline at end of file diff --git a/data/contact_info.csv b/data/contact_info.csv new file mode 100644 index 0000000..7bb6a1d --- /dev/null +++ b/data/contact_info.csv @@ -0,0 +1,7 @@ +Id of contact section,Icon used from font-awesome 4 to label this contact section,The actual value written for the contact entry +loc,icon,contact +email,envelope,nick.strayer@gmail.com +twitter,twitter,NicholasStrayer +github,github,github.com/nstrayer +website,link,nickstrayer.me +linkedin,linkedin,linkedin.com/in/nickstrayer \ No newline at end of file diff --git a/data/entries.csv b/data/entries.csv new file mode 100644 index 0000000..ccb2d9e --- /dev/null +++ b/data/entries.csv @@ -0,0 +1,47 @@ +Where in your CV this entry belongs,Main title of the entry,Location the entry occured,Primary institution affiliation for entry,Start date of entry (year),"End year of entry. Set to ""current"" if entry is still ongoing.","Each description column is a separate bullet point for the entry. If you need more description bullet points simply add a new column with title ""description_{4,5,..}""",,,A filter variable that is used to decide if entry is in the smaller resume. +section,title,loc,institution,start,end,description_1,description_2,description_3,in_resume +education,"PhD. Candidate, Biostatistics",Vanderbilt University,"Nashville, TN",2015,,Working on Bayesian network models & interactive visualization platforms,University Graduate Fellow,,TRUE +education,"B.S., Mathematics, Statistics (minor C.S.)",University of Vermont,"Burlington, VT",2011,2015,Thesis: An agent based model of Diel Vertical Migration patterns of Mysis diluviana,,,TRUE +research_positions,Research Assistant,Adair Laboratory,University of Vermont,2012,2013,Independently analyzed and constructed statistical models for large data sets pertaining to carbon decomposition rates.,,,FALSE +research_positions,Undergraduate Researcher,Bentil Laboratory,University of Vermont,2013,2014,Developed mathematical model to predict the transport of sulfur through the environment with applications in waste cleanup.,,,FALSE +research_positions,Undergraduate Researcher,Rubenstein Ecosystems Science Laboratory,University of Vermont,2013,2015,Analyzed and visualized data for CATOS fish tracking project.,Head of data mining project to establish temporal trends in population densities of Mysis diluviana (Mysis).,Ran project to mathematically model the migration patterns of Mysis (honors thesis project.),FALSE +research_positions,Human Computer Interaction Researcher,LabInTheWild (Reineke Lab),University of Michigan,2015,2015,Led development and implementation of interactive data visualizations to help users compare themselves to other demographics.,,,FALSE +research_positions,Graduate Research Assistant,TBILab (Yaomin Xu's Lab),Vanderbilt University,2015,,Primarily working with large EHR and Biobank datasets.,Developing network-based methods to investigate and visualize clinically relevant patterns in data.,,TRUE +research_positions,Data Science Researcher,Data Science Lab,Johns Hopkins University,2017,2018,Building R Shiny applications in the contexts of wearables and statistics education.,Work primarily done in R Shiny and Javascript (node and d3js).,,TRUE +industry_positions,Software Engineering Intern,Conduce,"Carpinteria, CA",2014,2014,Incorporated d3.js to the company's main software platform.,,,FALSE +industry_positions,Engineering Intern - User Experience,Dealer.com,"Burlington, VT",2015,2015,Built internal tool to help analyze and visualize user interaction with back-end products.,,,FALSE +industry_positions,Data Science Intern,Dealer.com,"Burlington, VT",2015,2015,Worked with the product analytics team to help parse and visualize large stores of data to drive business decisions.,,,FALSE +industry_positions,Data Artist In Residence,Conduce,"Carpinteria, CA",2014,2015,"Envisioned, prototyped and implemented visualization framework in the course of one month.",Constructed training protocol for bringing third parties up to speed with new protocol.,,FALSE +industry_positions,Data Journalist - Graphics Department,New York Times,"New York, New York",2016,2016,"Reporter with the graphics desk covering topics in science, politics, and sport.","Work primarily done in R, Javascript, and Adobe Illustrator.",,TRUE +teaching_positions,Javascript for Shiny Users,RStudio::conf 2020,N/A,,2020,Served as TA for two day workshop on how to leverage Javascript in Shiny applications,Lectured on [using R2D3 package to build interactive visualizations.](http://nickstrayer.me/js4shiny_r2d3/slides),,FALSE +teaching_positions,Statistical Computing in R,Vanderbilt Biostatistics Department,"Nashville, TN",2017,2017,TA and lectured,Covered introduction to R language for statistics applications,Graduate level class,FALSE +teaching_positions,Advanced Statistical Learning and Inference,Vanderbilt Biostatistics Department,"Nashville, TN",2017,2018,TA and lectured,Topics covered from penalized regression to boosted trees and neural networks,Highest level course offered in department,FALSE +teaching_positions,Advanced Statistical Computing,Vanderbilt Biostatistics Department,"Nashville, TN",2018,2018,TA and lectured,Covered modern statistical computing algorithms,4th year PhD level class,FALSE +teaching_positions,Data Visualization Best Practices,DataCamp,,2019,2019,Designed from bottom up course to teach best practices for scientific visualizations.,Uses R and ggplot2.,In top 10% on platform by popularity.,FALSE +teaching_positions,Improving your visualization in Python,DataCamp,,2019,2019,Designed from bottom up course to teach advanced methods for enhancing visualization.,"Uses python, matplotlib, and seaborn.",,FALSE +data_science_writings,[Classifying physical activity from smartphone data](https://blogs.rstudio.com/tensorflow/posts/2018-07-17-activity-detection/),RStudio Tensorflow Blog,,,2018,Walk through of training a convolutional neural network to achieve state of the art recognition of activities from accelerometer data.,Contracted article.,,FALSE +data_science_writings,[Using AWK and R to Parse 25tb](https://livefreeordichotomize.com/2019/06/04/using_awk_and_r_to_parse_25tb/),LiveFreeOrDichotomize.com,,,2019,Story of parsing large amounts of genomics data.,Provided advice for dealing with data much larger than disk.,Reached top of HackerNews.,TRUE +data_science_writings,[The United States of Seasons](https://livefreeordichotomize.com/2018/02/12/the-united-states-of-seasons/),LiveFreeOrDichotomize.com,,,2018,GIS analysis of weather data to find the most 'seasonal' locations in United States,Used Bayesian regression methods for smoothing sparse geospatial data.,,FALSE +data_science_writings,[A year as told by fitbit](https://livefreeordichotomize.com/2017/12/27/a-year-as-told-by-fitbit/),LiveFreeOrDichotomize.com,,,2017,Analyzing a full years worth of second-level heart rate data from wearable device.,Demonstrated visualization-based inference for large data.,,FALSE +data_science_writings,[MCMC and the case of the spilled seeds](https://livefreeordichotomize.com/2017/10/14/mcmc-and-the-case-of-the-spilled-seeds/),LiveFreeOrDichotomize.com,,,2017,Full Bayesian MCMC sampler running in your browser.,Coded from scratch in vanilla Javascript.,,FALSE +data_science_writings,[The Traveling Metallurgist](https://livefreeordichotomize.com/2017/09/25/the-traveling-metallurgist/),LiveFreeOrDichotomize.com,,,2017,Pure javascript implementation of traveling salesman solution using simulated annealing.,Allows reader to customize the number and location of cities to attempt to trick the algorithm.,,TRUE +about_me_press,[Great paper? Swipe right on the new ‘Tinder for preprints’ app](https://www.sciencemag.org/news/2017/06/great-paper-swipe-right-new-tinder-preprints-app),Science,,2017,2017,Story of the app [Papr](https://jhubiostatistics.shinyapps.io/papr/) made with Jeff Leek and Lucy D’Agostino McGowan.,,,FALSE +about_me_press,[Swipe right for science: Papr app is ‘Tinder for preprints’](https://www.nature.com/news/swipe-right-for-science-papr-app-is-tinder-for-preprints-1.22163),Nature News,,2017,2017,Second press article for app Papr.,,,FALSE +about_me_press,[The Deeper Story in the Data](https://www.uvm.edu/uvmnews/news/deeper-story-data),University of Vermont Quarterly,,2016,2016,Story on my path post graduation and the power of narrative.,,,FALSE +by_me_press,[The Great Student Migration](https://www.nytimes.com/interactive/2016/08/26/us/college-student-migration.html?smid=pl-share),The New York Times,,2016,2016,Most shared and discussed article from the New York Times for August 2016.,,,TRUE +by_me_press,"[Wildfires are Getting Worse, The New York Times](https://www.nytimes.com/interactive/2016/07/25/us/wildfire-seasons-los-angeles.html)",The New York Times,,2016,2016,GIS analysis and modeling of fire patterns and trends,Data in collaboration with NASA and USGS,,FALSE +by_me_press,[Who’s Speaking at the Democratic National Convention?](https://www.nytimes.com/2016/07/26/upshot/democrats-may-not-be-unified-but-their-convention-speakers-are.html),The New York Times,,2016,2016,Data scraped from CSPAN records to figure out who talked and past conventions.,,,FALSE +by_me_press,[Who’s Speaking at the Republican National Convention?](https://www.nytimes.com/2016/07/19/upshot/whos-not-speaking-how-this-republican-convention-differs.html?smid=pl-share),The New York Times,,2016,2016,Used same data scraping techniques as Who’s Speaking at the Democratic National Convention?,,,FALSE +by_me_press,"[A Trail of Terror in Nice, Block by Block](https://www.nytimes.com/interactive/2016/07/14/world/europe/trail-of-terror-france.html)",The New York Times,,2016,2016,"Led research effort to put together story of 2016 terrorist attack in Nice, France in less than 12 hours.","Work won Silver medal at Malofiej 2017, and gold at Society of News and Design.",,FALSE +academic_articles,Asymmetric Linkage Disequilibrium: Tools for Dissecting Multiallelic LD,Journal of Human Immunology,,2015,2015,"Authored with Richard Single, Vanja Paunic, Mark Albrecht, and Martin Maiers.",,,TRUE +academic_articles,[An Agent Based Model of Mysis Migration](https://www.semanticscholar.org/paper/An-Agent-Based-Model-of-the-Diel-Vertical-Migration-Strayer-Stockwell/40493c78e8ecf22bd882d17ec99fd913ec4b9820),International Association of Great Lakes Research Conference,,2015,2015,"Authored with Brian O'Malley, Sture Hansson, and Jason Stockwell.",,,FALSE +academic_articles,Declines of Mysis diluviana in the Great Lakes,Journal of Great Lakes Research,,2015,2015,Authored with Peter Euclide and Jason Stockwell.,,,FALSE +academic_articles,[Continuous Classification using Deep Neural Networks](http://nickstrayer.me/qualifying_exam/),Vanderbilt Biostatistics Qualification Exam,,2017,2017,Review of methods for classifying continuous data streams using neural networks,Successfully met qualifying examination standards,,FALSE +academic_articles,[Charge Reductions Associated with Shortening Time to Recovery in Septic Shock](https://www.ncbi.nlm.nih.gov/pubmed/30419234),Chest,,2019,2019,"Authored with Wesley H. Self, MD MPH; Dandan Liu, PhD; Stephan Russ, MD, MPH; Michael J. Ward, MD, PhD, MBA; Nathan I. Shapiro, MD, MPH; Todd W. Rice, MD, MSc; Matthew W. Semler, MD, MSc.",,,TRUE +academic_articles,R timelineViz: Visualizing the distribution of study events in longitudinal studies,Under-Review (copy available upon request.),,2018,2018,Authored with Alex Sunderman of the Vanderbilt Department of Epidemiology.,,,FALSE +academic_articles,[Multimorbidity Explorer | A shiny app for exploring EHR and biobank data](http://nickstrayer.me/rstudioconf19_me-poster/),RStudio::conf 2019,,2019,2019,Contributed Poster. Authored with Yaomin Xu.,,,TRUE +academic_articles,[Taking a network view of EHR and Biobank data to find explainable multivariate patterns](http://nickstrayer.me/biostat_seminar/),Vanderbilt Biostatistics Seminar Series,,2019,2019,University wide seminar series.,,,FALSE +academic_articles,Patient-specific risk factors independently influence survival in Myelodysplastic Syndromes in an unbiased review of EHR records,Under-Review (copy available upon request.),,,2019,Bayesian network analysis used to find novel subgroups of patients with Myelodysplastic Syndromes (MDS).,Analysis done using method built for my dissertation.,,FALSE +academic_articles,Building a software package in tandem with machine learning methods research can result in both more rigorous code and more rigorous research,ENAR 2020,,,2020,Invited talk in Human Data Interaction section.,How and why building an R package can benefit methodological research,,FALSE +academic_articles,"[Stochastic Block Modeling in R, Statistically rigorous clustering with rigorous code](http://nickstrayer.me/rstudioconf_sbm)",RStudio::conf 2020,,,2020,Invited talk about new [sbmR package](https://tbilab.github.io/sbmR/).,Focus on how software development and methodological research can improve both benefit when done in tandem.,,TRUE +academic_articles,Patient specific comorbidities impact overall survival in myelofibrosis,Under-Review (copy available upon request.),,,2019,Bayesian network analysis used to find robust novel subgroups of patients with given genetic mutations.,Analysis done using method built for my dissertation.,,FALSE \ No newline at end of file diff --git a/data/language_skills.csv b/data/language_skills.csv new file mode 100644 index 0000000..7db141e --- /dev/null +++ b/data/language_skills.csv @@ -0,0 +1,9 @@ +Name of language,Relative numeric level of skill +skill,level +R,5 +Javascript (d3.js),4.5 +C++,4 +Python,4 +Bash,3.5 +SQL,3 +AWK,3 \ No newline at end of file diff --git a/data/position_data.csv b/data/position_data.csv deleted file mode 100644 index e8ef82d..0000000 --- a/data/position_data.csv +++ /dev/null @@ -1,18 +0,0 @@ -include;title;institution;loc;start;end;description_1;description_2;description_3;description_4;description_5 -VERDADEIRO;Analista de Dados;Agncia Pblica de Jornalismo Investigativo;So Paulo, SP;2022;Present;Crio solues em automao para a rotina com dados e auxilio em reportagens que possuem demandas em bases de dados;. Participei do projeto Mapa dos Conflitos, parceria da Agncia Pblica com a Comisso Pastoral da Terra (CPT);NA;NA;NA -VERDADEIRO;Prmio ANA;aa;NA;2023;2023;A reportagem Os privilegiados com a gua do cerrado baiano foi finalista da 18 edio do Prmio ANA na categoria Comunicao Mdia Impressa ou Sonora.;NA;NA;NA;NA -VERDADEIRO;Bacharelado em Jornalismo;Universidade de So Paulo;So Paulo, SP;2018;2023;Fui reprter e editora de diferentes jornais-laboratrio: o Notcias do Jardim So Remo, a Agncia Universitria de Notcias, o Jornal do Campus e o suplemento claro!.;Membro da Jornalismo Jnior, empresa jnior de Jornalismo da ECA-USP (2018 - 2019);"Fui monitora dos cursos de vero do Instituto de Matemtica e Estatstica (IME) da USP ""Python para anlise de dados"" (2022) e ""R para anlise de dados"" (2023)";NA;NA -VERDADEIRO;39 Prmio Direitos Humanos de Jornalismo (Meno honrosa);OAB-RS;NA;2022;2022;NA;NA;NA;NA;NA -VERDADEIRO;Design for a Better World Award 2022;x;NA;2022;2022;O Mapa Dos Conflitos foi um dos vencedores da categoria Design Grfico, Design Digital, UX e UI;NA;NA;NA;NA -VERDADEIRO;The Sigma Awards 2022 - Shortlist;y;NA;2022;2022;A reportagem Brasil registra duas vezes mais pessoas brancas vacinadas que negras foi finalista do The Sigma Awards de 2022.;NA;NA;NA;NA -VERDADEIRO;Prmio Cludio Weber Abramo de Jornalismo de Dados - Shortlist (2022);z;NA;2022;2022;NA;NA;NA;NA;NA -VERDADEIRO;Master em Jornalismo de Dados, Automao e Data Storytelling;Insper;So Paulo, SP;2021;2022;Lato-sensu specialization with double certification.;NA;NA;NA;NA -VERDADEIRO;Estagiria de Jornalismo de Dados;Agncia Pblica de Jornalismo Investigativo;So Paulo, SP;2020;2022;Como estagiria, participei de coberturas reconhecidas pelo Prmio Roche de Jornalismo em Sade e reportagens finalistas do Sigma Awards e prmio Cludio Weber Abramo de Jornalismo de Dados.;NA;NA;NA;NA -VERDADEIRO;Mestrado em Cincias;Universidade Federal de So Paulo;So Paulo, SP;2018;2022;"Mestrado em Cincias pelo Programa de Ps-Graduao em Farmacologia (Conceito CAPES 6), com projeto intitulado ""Manipulaes farmacolgicas e no-farmacolgicas para o reestabelecimento de um ""tnus hednico"" em um modelo animal de esquizofrenia: a linhagem SHR""";Bolsista da Coordenao de Aperfeioamento de Pessoal de Nvel Superior, CAPES (2018 - 2019);NA;NA;NA -VERDADEIRO;39 Prmio Direitos Humanos de Jornalismo;OAB-RS;NA;2021;2021;O Mapa dos Conflitos ficou em 2 lugar na categoria Online;NA;NA;NA;NA -VERDADEIRO;The Sigma Awards 2021 - Shortlist;y;NA;2021;2021;NA;NA;NA;NA;NA -VERDADEIRO;Prmio Cludio Weber Abramo de Jornalismo de Dados - Shortlist (2021);z;NA;2021;2021;NA;NA;NA;NA;NA -VERDADEIRO;Prmio Roche de Jornalismo em Sade (Meno honrosa);Roche;NA;2019;2019;NA;NA;NA;NA;NA -VERDADEIRO;Diretora vice-presidente;Jornalismo Jnior;So Paulo, SP;2019;2019;Desempenhei atividades como planejamento financeiro, atualizao do fluxo de caixa, oramentos, redao de contratos e regulamentao jurdica, alm de gerenciar projetos internos e externos com a presidente;NA;NA;NA;NA -VERDADEIRO;Reprter;Jornalismo Jnior;So Paulo, SP;2018;2018;NA;NA;NA;NA;NA -VERDADEIRO;Bacharelado em Biomedicina;Universidade Federal de So Paulo;So Paulo, SP;2014;2017;"Participei da equipe executora do projeto de extenso universitria ""Patgenos em Jogo"" (2016)";Organizei o XV Curso de Inverno da Biomedicina - Unifesp (2017);NA;NA;NA \ No newline at end of file diff --git a/data/position_data2.csv b/data/position_data2.csv deleted file mode 100644 index 84d69f9..0000000 --- a/data/position_data2.csv +++ /dev/null @@ -1,13 +0,0 @@ -section,include,title,institution,loc,start,end,description_1,description_2,description_3,description_4,description_5 -education,TRUE,"M.S., Decision Analytics | M.B.A.",Virginia Commonwealth University,"Richmond, VA",NA,NA,Distinguished Alumni Award for developing an alumni analytics meetup group,NA,NA,NA,NA -education,FALSE,"M.S., Decision Analytics",Virginia Commonwealth University,"Richmond, VA",NA,NA,NA,NA,NA,NA,NA -education,FALSE,M.B.A.,Virginia Commonwealth University,"Richmond, VA",NA,NA,NA,NA,NA,NA,NA -education,TRUE,"B.S., Political Science",Campbell University,"Buies Creek, NC",NA,NA,NA,NA,NA,NA,NA -work,TRUE,"Manager, Experimentation & Analytics",Markel Corporation,"Richmond, VA",2022,Present,Leads a team of data scientists and data engineers building pilot solutions and conducting experimental design at a Fortune 500 specialty insurance company,"Leverages test-and-learn methodology, delivering pilot solutions quickly to small groups to measure value before scaling products",Prioritizes work based on customer engagement to understand pain-points and identify data-enabled solutions to suit business needs,"Uses Azure DevOps for the continuous deployment of applications to production environments, experienced with establishing and maintaining VMs in Azure","Remains a hands-on developer, regularly using R, Python, Git, Databricks, etc." -work,TRUE,Senior Data Scientist ,Markel Corporation,"Richmond, VA",2018,2022,Associate Data Scientist (2018-2020) - Data Scientist (2020-2021),Data science developer on an agile sprint team managing applications that prioritize and monitor incoming business submissions,"Develops, maintains, and deploys machine learning models, primarily GLMs, decision trees, and dabbling in unsupervised techniques","Conducts ad hoc analysis to diagnosis trends, analyze model performance, and make business recommendations based on data and statistical tests","Co-chairman of the Markel Veteran's Network, an Employee Resource Group (2020)" -work,TRUE,Legislative and Military Liaison,Virginia Department of Motor Vehicles,"Richmond, VA",2014,2018,"Advisor to the commissioner and senior executives of an 1,800-person organization, representing the commissioner at the state legislature and senior-level meetings",Communicated agency policies to the General Assembly and Military installations,NA,NA,NA -work,TRUE,Public Affairs Manager | Print Journalist ,U.S. Army,"Fort Bragg, NC",2005,2014,"Served in a variety of journalism and public relations roles with the 82nd Airborne Division, U.S. Military Academy and a Special Forces unit, including two deployments to Afghanistan","Managed small, technical teams supporting high-risk training and deployments ",Named Distinguished Honor Graduate and Distinguished Leader Graduate during two Army leadership training courses,NA,NA -work,FALSE,Public Affairs Manager,3rd Special Forces Group (Airborne),"Fort Bragg, NC",2012,2014,"Public affairs manager and advisor to senior executives of a 2,300-person organization conducting high-risk training and missions throughout the world",Led a team of four to develop communications strategies and multimedia content,NA,NA,NA -work,FALSE,Public Affairs Manager,U.S. Military Academy at West Point,"West Point, N.Y.",2010,2012,"Senior public affairs manager, directly supervised two production specialists and administratively responsible for five",NA,NA,NA,NA -work,FALSE,Print Journalist | Public Affairs Manager,82nd Airborne Division,"Fort Bragg, NC",2005,2010,"Print journalist and public affairs manager for a 4,000-person organization, supervised three direct reports, advised executive staff, and deployed twice to Afghanistan",NA,NA,NA,NA -awards,FALSE,Business School Alumni Award,Virginia Commonwealth University,"Richmond, VA",2019,NA,Given Distinguished Alumni Award for starting an alumni meetup group to share data science trends and best practices,NA,NA,NA,NA \ No newline at end of file diff --git a/data/position_data_en.csv b/data/position_data_en.csv deleted file mode 100644 index 606b59e..0000000 --- a/data/position_data_en.csv +++ /dev/null @@ -1,12 +0,0 @@ -section;include;title;institution;loc;start;end;description_1;description_2;description_3;description_4;description_5;description_6;description_7;description_8;description_9;description_10;description_11;description_12;description_13 -work;TRUE;Data Analyst;Agência Pública de Jornalismo Investigativo;Sao Paulo-SP;2022;Present;Create automation solutions for the data journalism workflow and assist in works that are data-driven;Participated in the Map of Conflict, a project by the investigative journalism outlet Agência Pública, in partnership with the Pastoral Land Commission (CPT). Using original data analysis, it investigates instances of rural conflict in Brazil’s Legal Amazon region in the last decade (2011-2020);NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -education;TRUE;Bachelor of Journalism;University of São Paulo (USP);NA;2018;2023;Member of the Junior Enterprise of Journalism at ECA -USP (2018 - 2019);"Monitor of the Summer Courses of the Institute of Mathematics and Statistics (IME) of USP ""Python for Data Analysis"" (2022) and ""R for data analysis"" (2023)";NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -education;TRUE;Master in Data Journalism, Automation and Data Storytelling;Insper;NA;2021;2022;Specialization with double certification;;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -work;TRUE;Data Journalism Intern;Agência Pública de Jornalismo Investigativo;NA;2020;2022;Participated in coverage recognized for the Roche Health Journalism Award and finalist reports of Sigma Awards and Claudio Weber Abramo Award from Data Journalism.;;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -education;TRUE;MS in Pharmacology;Federal University of São Paulo - UNIFESP;NA;2018;2022;"Master in Sciences from the Postgraduate Program in Pharmacology (Concept CAPES 6), with a project entitled ""Pharmacological and Non-Farmacological Manipulations for the reestablishment of a"" hedonic tone ""in an animal model of schizophrenia: the SHR strain""";CAPES scholarship (2018 - 2019);NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -awards;TRUE;Awards, shortlists and honorable mentions;Associated with the Agência Pública de Jornalismo Investigativo;NA;2021;2021;Claudio Weber Abramo Award for Data Journalism - Shortlist (2021);The Sigma Awards 2021 - Shortlisted with the work;Roche Health Journalism Award (Honorable Mention);39th Human Rights of Journalism Award;39th Human Rights of Journalism Award;Claudio Weber Abramo Award for Data Journalism - Shortlist (2022);"The Sigma Awards 2022 - Shortlisted with the work ""Map of Conflict""";Agência Nacional de Águas e Saneamento Award 2023 - Shortlisted;39th Human Rights Award for Journalism (Honorable Mention);"Design for Better World Award 2022 - Winner with the work ""Map of Conflict""";NA;NA;NA -work;TRUE;Vice-President Director;Jornalismo Júnior;NA;2019;2019;Headed the financial planning of the Jornalismo Junior, with activities like: cash flow update, and legal regulation;"Managed internal and external projects. One of them was ""São Paulo sem Migué"", a fact-checking project about the city of São Paulo";NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -work;TRUE;News reporter;Jornalismo Júnior;NA;2018;2018;Wrote articles about sciences, sports, culture and politics;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -education;TRUE;Bachelor of Biomedicine;Federal University of São Paulo - UNIFESP;NA;2014;2017;"Participated in the development of the ""Patógenos em Jogo"", an extension project of Unifesp (2016)";Coordinated the XV Biomedicine Winter Course, at Unifesp (2017);NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -talks;TRUE;Lectures and workshops;NA;NA;NA;NA;"Lecture: ""Step by step of a data driven story with the Map of Conflict"" (2023, ""Coverage of Socio-Environmental Conflicts"" - Portal Assobiar)";"Lecture: ""Intro to Data Journalism"" at the ""Communication Course: Practice and Reflection"" (2023, Colabora)";Climate Data Laboratory - Alma Preta Jornalismo (2023, Escola de Dados);"Lecture ""Dataviz at the Agência Pública"" (2023, Unisinos)";Introduction to R and Tidyverse (2023, R-Ladies São Paulo);Workshop “Python + R together with Quarto: your reports will never be the same” (2023, CODA Amazônia);Workshop “Spreadsheet: a great ally of data analysis” (2023, CODA Amazônia);"Lecture: ""Data visualization reveal social problems"" (2023, Domingo de Dados - 18th Abraji Congress)";"Workshop ""Open Data Analysis with R - Open Data Day"" (2023, R -Ladies São Paulo)";Workshop “Data journalism with spreadsheet editors” (2022, CODA.Br);Webinar “the public in data and the map of conflicts” (2022, data school);Data journalism to change realities (2022, Data Sunday - 17th Abraji Congress);"Lecture: ""Business Chain - Data in all sectors of the economy: Data journalism"" (2021, BIX Technology)" -awards;TRUE;Scholarships and Fellowships;NA;NA;NA;NA;Entrepreneurship and Innovation Scholarship (Agência USP de Inovação - AUSPIN, 2022). With this scholarship, I developed a research project for four months at University of Texas at Austin about what data journalism teaching is like in the USA;Amazon Rainforest Journalism Fund, from Pulitzer Center (2021);NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA \ No newline at end of file diff --git a/data/position_data_pt.csv b/data/position_data_pt.csv deleted file mode 100644 index 704abf1..0000000 --- a/data/position_data_pt.csv +++ /dev/null @@ -1,21 +0,0 @@ -section;include;title;institution;loc;start;end;description_1;description_2;description_3;description_4;description_5;description_6;description_7;description_8;description_9;description_10;description_11;description_12;description_13 -work;TRUE;Analista de Dados;Agência Pública de Jornalismo Investigativo;São Paulo, SP;2022;Present;Crio soluções em automação para a rotina com dados e auxilio em reportagens que possuem demandas em bases de dados;. Participei do projeto Mapa dos Conflitos, parceria da Agência Pública com a Comissão Pastoral da Terra (CPT);NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -awards;TRUE;Prêmio ANA;Associado à Agência Pública de Jornalismo Investigativo;NA;2023;2023;A reportagem “Os privilegiados com a água do cerrado baiano” foi finalista da 18ª edição do Prêmio ANA na categoria Comunicação – Mídia Impressa ou Sonora.;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -education;TRUE;Bacharelado em Jornalismo;Universidade de São Paulo - SP;São Paulo, SP;2018;2023;Fui repórter e editora de diferentes jornais-laboratório: o Notícias do Jardim São Remo, a Agência Universitária de Notícias, o Jornal do Campus e o suplemento claro!.;Membro da Jornalismo Júnior, empresa júnior de Jornalismo da ECA-USP (2018 - 2019);"Fui monitora dos cursos de verão do Instituto de Matemática e Estatística (IME) da USP ""Python para análise de dados"" (2022) e ""R para análise de dados"" (2023)";NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -awards;TRUE;39º Prêmio Direitos Humanos de Jornalismo (Menção honrosa);Associado à Agência Pública de Jornalismo Investigativo;NA;2022;2022;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -awards;TRUE;Design for a Better World Award 2022;Associado à Agência Pública de Jornalismo Investigativo;NA;2022;2022;O Mapa Dos Conflitos foi um dos vencedores da categoria Design Gráfico, Design Digital, UX e UI;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -awards;TRUE;The Sigma Awards 2022 - Shortlist;Associado à Agência Pública de Jornalismo Investigativo;NA;2022;2022;A reportagem “Brasil registra duas vezes mais pessoas brancas vacinadas que negras” foi finalista do The Sigma Awards de 2022.;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -awards;TRUE;Prêmio Cláudio Weber Abramo de Jornalismo de Dados - Shortlist (2022);Associado à Agência Pública de Jornalismo Investigativo;NA;2022;2022;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -education;TRUE;Master em Jornalismo de Dados, Automação e Data Storytelling;Insper;São Paulo, SP;2021;2022;Lato-sensu specialization with double certification.;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -work;TRUE;Estagiária de Jornalismo de Dados;Agência Pública de Jornalismo Investigativo;São Paulo, SP;2020;2022;Como estagiária, participei de coberturas reconhecidas pelo Prêmio Roche de Jornalismo em Saúde e reportagens finalistas do Sigma Awards e prêmio Cláudio Weber Abramo de Jornalismo de Dados.;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -education;TRUE;Mestrado em Ciências;Universidade Federal de São Paulo - Unifesp;São Paulo, SP;2018;2022;"Mestrado em Ciências pelo Programa de Pós-Graduação em Farmacologia (Conceito CAPES 6), com projeto intitulado ""Manipulações farmacológicas e não-farmacológicas para o reestabelecimento de um ""tônus hedônico"" em um modelo animal de esquizofrenia: a linhagem SHR""";Bolsista da Coordenação de Aperfeiçoamento de Pessoal de Nível Superior, CAPES (2018 - 2019);NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -awards;TRUE;39º Prêmio Direitos Humanos de Jornalismo;Associado à Agência Pública de Jornalismo Investigativo;NA;2021;2021;O Mapa dos Conflitos ficou em 2º lugar na categoria Online;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -awards;TRUE;The Sigma Awards 2021 - Shortlist;Associado à Agência Pública de Jornalismo Investigativo;NA;2021;2021;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -awards;TRUE;Prêmio Cláudio Weber Abramo de Jornalismo de Dados - Shortlist (2021);Associado à Agência Pública de Jornalismo Investigativo;NA;2021;2021;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -awards;TRUE;Prêmio Roche de Jornalismo em Saúde (Menção honrosa);Roche;NA;2019;2019;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -work;TRUE;Diretora vice-presidente;Jornalismo Júnior;São Paulo, SP;2019;2019;Desempenhei atividades como planejamento financeiro, atualização do fluxo de caixa, orçamentos, redação de contratos e regulamentação jurídica, além de gerenciar projetos internos e externos com a presidente;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -work;TRUE;Repórter;Jornalismo Júnior;São Paulo, SP;2018;2018;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -education;TRUE;Bacharelado em Biomedicina;Universidade Federal de São Paulo - Unifesp;São Paulo, SP;2014;2017;"Participei da equipe executora do projeto de extensão universitária ""Patógenos em Jogo"" (2016)";Organizei o XV Curso de Inverno da Biomedicina - Unifesp (2017);NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -education;TRUE;Bolsa de Empreendedorismo e Inovação;Agência USP de Inovação - AUSPIN;Austin, TX, EUA;2022;2022;Desenvolvo um projeto de pesquisa que estuda instituições de ensino e ambientes profissionais de jornalismo de dados nos Estados Unidos;O projeto foi selecionado para receber bolsa de Empreendedorismo e Inovação da Agência USP de Inovação - AUSPIN.;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -talks;TRUE;NA;NA;NA;NA;NA;Workshop “Python + R juntos com Quarto: seus relatórios nunca mais serão os mesmos” (2023, CODA Amazônia);Workshop “Planilha: uma grande aliada da análise de dados” (2023, CODA Amazônia);Workshop “Jornalismo de dados com editores de planilhas” (2022, CODA.Br);Webinar “A Pública em dados e o Mapa dos Conflitos” (2022);Laboratório de Dados Climáticos - Alma Preta Jornalismo;Introdução ao R e ao Tidyverse;Open Data Day;Jornalismo de dados para mudar realidades;Visualizações de dados revelam problemas sociais;Treinamento Portal Assobiar;Curso Comunicação: prática e reflexão;Dataviz na Agência Pública;Cadeia de Negócios - Dados em todos os setores da economia - Jornalismo de dados -awards;TRUE;Bolsista do Amazon Rainforest Journalism Fund, do Pulitzer Center;Associado à Agência Pública de Jornalismo Investigativo;NA;2020;2020;d;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA \ No newline at end of file diff --git a/data/position_data_pt2.csv b/data/position_data_pt2.csv deleted file mode 100644 index 2c33723..0000000 --- a/data/position_data_pt2.csv +++ /dev/null @@ -1,12 +0,0 @@ -section;include;title;institution;loc;start;end;description_1;description_2;description_3;description_4;description_5;description_6;description_7;description_8;description_9;description_10;description_11;description_12;description_13 -work;TRUE;Analista de Dados;Agência Pública de Jornalismo Investigativo;São Paulo, SP;2022;Present;Crio soluções em automação para a rotina com dados e auxilio em reportagens que possuem demandas em bases de dados;. Participei do projeto Mapa dos Conflitos, parceria da Agência Pública com a Comissão Pastoral da Terra (CPT);NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -education;TRUE;Bacharelado em Jornalismo;Universidade de São Paulo - SP;São Paulo, SP;2018;2023;Fui repórter e editora de diferentes jornais-laboratório: o Notícias do Jardim São Remo, a Agência Universitária de Notícias, o Jornal do Campus e o suplemento claro!.;Membro da Jornalismo Júnior, empresa júnior de Jornalismo da ECA-USP (2018 - 2019);"Fui monitora dos cursos de verão do Instituto de Matemática e Estatística (IME) da USP ""Python para análise de dados"" (2022) e ""R para análise de dados"" (2023)";NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -education;TRUE;Master em Jornalismo de Dados, Automação e Data Storytelling;Insper;São Paulo, SP;2021;2022;Lato-sensu specialization with double certification.;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -work;TRUE;Estagiária de Jornalismo de Dados;Agência Pública de Jornalismo Investigativo;São Paulo, SP;2020;2022;Como estagiária, participei de coberturas reconhecidas pelo Prêmio Roche de Jornalismo em Saúde e reportagens finalistas do Sigma Awards e prêmio Cláudio Weber Abramo de Jornalismo de Dados.;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -education;TRUE;Mestrado em Ciências;Universidade Federal de São Paulo - Unifesp;São Paulo, SP;2018;2022;"Mestrado em Ciências pelo Programa de Pós-Graduação em Farmacologia (Conceito CAPES 6), com projeto intitulado ""Manipulações farmacológicas e não-farmacológicas para o reestabelecimento de um ""tônus hedônico"" em um modelo animal de esquizofrenia: a linhagem SHR""";Bolsista da Coordenação de Aperfeiçoamento de Pessoal de Nível Superior, CAPES (2018 - 2019);NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA -awards;TRUE;Prêmios e menções honrosas;Associado à Agência Pública de Jornalismo Investigativo;NA;2021;2021;Prêmio Cláudio Weber Abramo de Jornalismo de Dados - Shortlist (2021);The Sigma Awards 2021 - Shortlist;Prêmio Roche de Jornalismo em Saúde (Menção honrosa);39º Prêmio Direitos Humanos de Jornalismo;39º Prêmio Direitos Humanos de Jornalismo;Prêmio Cláudio Weber Abramo de Jornalismo de Dados - Shortlist (2022);The Sigma Awards 2022 - Shortlist;Prêmio ANA;39º Prêmio Direitos Humanos de Jornalismo (Menção honrosa);Design for a Better World Award 2022;NA;NA;NA -work;TRUE;Diretora vice-presidente;Jornalismo Júnior;São Paulo, SP;2019;2019;Desempenhei atividades como planejamento financeiro, atualização do fluxo de caixa, orçamentos, redação de contratos e regulamentação jurídica, além de gerenciar projetos internos e externos com a presidente;NA;NA;NA;NA;NA;NA;;NA;NA;NA;NA;NA -work;TRUE;Repórter;Jornalismo Júnior;São Paulo, SP;2018;2018;NA;NA;NA;NA;NA;NA;NA;;NA;NA;NA;NA;NA -education;TRUE;Bacharelado em Biomedicina;Universidade Federal de São Paulo - Unifesp;São Paulo, SP;2014;2017;"Participei da equipe executora do projeto de extensão universitária ""Patógenos em Jogo"" (2016)";Organizei o XV Curso de Inverno da Biomedicina - Unifesp (2017);NA;NA;NA;NA;NA;;NA;NA;NA;NA;NA -talks;TRUE;Talks e workshops;NA;NA;NA;NA;Workshop “Python + R juntos com Quarto: seus relatórios nunca mais serão os mesmos” (2023, CODA Amazônia);Workshop “Planilha: uma grande aliada da análise de dados” (2023, CODA Amazônia);Workshop “Jornalismo de dados com editores de planilhas” (2022, CODA.Br);Webinar “A Pública em dados e o Mapa dos Conflitos” (2022);Laboratório de Dados Climáticos - Alma Preta Jornalismo;Introdução ao R e ao Tidyverse;Open Data Day;Jornalismo de dados para mudar realidades;Visualizações de dados revelam problemas sociais;Treinamento Portal Assobiar;Curso Comunicação: prática e reflexão;Dataviz na Agência Pública;Cadeia de Negócios - Dados em todos os setores da economia - Jornalismo de dados -awards;TRUE;Bolsas e fellowships;NA;NA;2020;2020;Bolsista do Amazon Rainforest Journalism Fund, do Pulitzer Center;Bolsa de Empreendedorismo e Inovação;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA;NA \ No newline at end of file diff --git a/data/skill_data.csv b/data/skill_data.csv deleted file mode 100644 index 2ea04a3..0000000 --- a/data/skill_data.csv +++ /dev/null @@ -1,16 +0,0 @@ -section,skill,level -Tech,R,4.25 -Tech,SQL,3 -Tech,Python,3.75 -Tech,Git,3 -Tech,Azure / AWS,3.5 -Interests,Advanced Statistics,4.25 -Interests,Software Engineering,4 -Interests,Databricks,4 -Interests,ML Ops,5 -Interests,Cloud infrastructure,4.75 -R Packages,tidyverse,4.25 -R Packages,rmarkdown,4.25 -R Packages,tidymodels,3.5 -R Packages,purrr,2.5 -R Packages,usethis | devtools,3.5 \ No newline at end of file diff --git a/data/text_blocks.csv b/data/text_blocks.csv new file mode 100644 index 0000000..3c59328 --- /dev/null +++ b/data/text_blocks.csv @@ -0,0 +1,8 @@ +Id used for finding text block,Contents of text block. Supports markdown formatting. +loc,text +intro,"I have made [visualizations viewed by hundreds of thousands of people](https://www.nytimes.com/interactive/2016/08/26/us/college-student-migration.html), [sped up query times for 25 terabytes of data by an average of 4,800 times](https://livefreeordichotomize.com/2019/06/04/using_awk_and_r_to_parse_25tb/), and built [packages for R](https://github.com/nstrayer/shinysense) that let you [do magic](http://nickstrayer.me/dataDayTexas/). + +Currently searching for a position that allows me to build tools leveraging a combination of visualization, machine learning, and software engineering to help people explore and understand their data in new and useful ways." +industy_experience_aside,I have worked in a variety of roles ranging from journalist to software engineer to data scientist. I like collaborative environments where I can learn from my peers. +teaching_experience_aside,I am passionate about education. I believe that no topic is too complex if the teacher is empathetic and willing to think about new methods of approaching task. +data_science_writing_aside,I regularly blog about data science and visualization on my blog [LiveFreeOrDichotomize.](https://livefreeordichotomize.com/) \ No newline at end of file diff --git a/resume.Rproj b/datadrivencv.Rproj similarity index 57% rename from resume.Rproj rename to datadrivencv.Rproj index 8e3c2eb..497f8bf 100644 --- a/resume.Rproj +++ b/datadrivencv.Rproj @@ -11,3 +11,10 @@ Encoding: UTF-8 RnwWeave: Sweave LaTeX: pdfLaTeX + +AutoAppendNewline: Yes +StripTrailingWhitespace: Yes + +BuildType: Package +PackageUseDevtools: Yes +PackageInstallArgs: --no-multiarch --with-keep.source diff --git a/css/styles_html.css b/dd_cv.css similarity index 56% rename from css/styles_html.css rename to dd_cv.css index 5d47821..fec3877 100644 --- a/css/styles_html.css +++ b/dd_cv.css @@ -1,18 +1,18 @@ -@import url('https://fonts.googleapis.com/css?family=Arimo|Open+Sans&display=swap'); +@import url("https://fonts.googleapis.com/css?family=Montserrat|Playfair+Display&display=swap"); -/* Main text is Opens Sans font*/ +/* Main text is monserrat*/ body { - font-family: "Open Sans", sans-serif; + font-family: "Montserrat", sans-serif; font-weight: 300; line-height: 1.3; color: #444; } -/* Give headers Arimo font */ +/* Give headers playfair font */ h1, h2, h3 { - font-family: "Arimo", serif; + font-family: "Playfair Display", serif; color: #000; } @@ -29,17 +29,18 @@ sup { * { /* Override default right margin for sidebar*/ - --pagedjs-margin-right: 0.5in; + --pagedjs-margin-right: 0.2in; --pagedjs-margin-left: 0.2in; + --pagedjs-margin-top: 0.2in; } /* Customize some of the sizing variables */ :root { --sidebar-width: 12rem; /* Shrink sidebar width */ - --sidebar-background-color: #a99384; /* Make sidebar #edeffc */ + --sidebar-background-color: #f7fbff; /* Make sidebar blue */ --sidebar-horizontal-padding: 0.01in; /* Reduce sidebar padding */ --decorator-outer-dim: 10px; /* Make position deliniating circles larger */ - /* --decorator-border: 2px solid #edeffc; /* Make timeline a blue as well*/ + --decorator-border: 2px solid #bdd7e7; /* Make timeline a blue as well*/ } .details .place { @@ -55,6 +56,11 @@ sup { margin-left: -3px; } +/* When we have links at bottom in a list make sure they actually are numbered */ +#links li { + list-style-type: decimal; +} + /* Dont put the little fake list point in front of links */ .aside li::before { display: none; @@ -78,16 +84,42 @@ sup { font-size: 0.75rem; } +/* Make little circle outline be a light blue */ +.decorator::after { + background-color: #08306b; +} + /* Remove the fake bullets from lists */ .aside li::before { content: auto; } .skill-bar { - color: white; + color: black; padding: 0.1rem 0.25rem; margin-top: 3px; position: relative; width: 100%; } + +/* When the class no-timeline is added we remove the after psuedo element from the header... */ + +/* Removes the psuedo element on h2 tags for this section */ +.section.no-timeline h2::after { + content: none; +} + +/* Without adding padding the content is all up on the title */ +.section.no-timeline h2 { + padding-bottom: 1rem; +} + +/* Add styles for little cards */ +.info-card{ + width: 220px; + float: left; + padding: 0.5rem; + margin: 0.5rem; + box-shadow: 1px 1px 4px black; +} diff --git a/docs/404.html b/docs/404.html new file mode 100644 index 0000000..16ca807 --- /dev/null +++ b/docs/404.html @@ -0,0 +1,144 @@ + + + + + + + + +Page not found (404) • datadrivencv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +Content not found. Please use links in the navbar. + +
    + +
    + + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/LICENSE-text.html b/docs/LICENSE-text.html new file mode 100644 index 0000000..0b01b0a --- /dev/null +++ b/docs/LICENSE-text.html @@ -0,0 +1,146 @@ + + + + + + + + +License • datadrivencv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    YEAR: 2020
    +COPYRIGHT HOLDER: Nick Strayer
    +
    + +
    + +
    + + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/LICENSE.html b/docs/LICENSE.html new file mode 100644 index 0000000..b2296f7 --- /dev/null +++ b/docs/LICENSE.html @@ -0,0 +1,150 @@ + + + + + + + + +MIT License • datadrivencv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    + +

    Copyright (c) 2020 Nick Strayer

    +

    Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

    +

    The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

    +

    THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

    +
    + +
    + +
    + + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/apple-touch-icon-120x120.png b/docs/apple-touch-icon-120x120.png new file mode 100644 index 0000000..bee86ed Binary files /dev/null and b/docs/apple-touch-icon-120x120.png differ diff --git a/docs/apple-touch-icon-152x152.png b/docs/apple-touch-icon-152x152.png new file mode 100644 index 0000000..4da65cc Binary files /dev/null and b/docs/apple-touch-icon-152x152.png differ diff --git a/docs/apple-touch-icon-180x180.png b/docs/apple-touch-icon-180x180.png new file mode 100644 index 0000000..1eca8e6 Binary files /dev/null and b/docs/apple-touch-icon-180x180.png differ diff --git a/docs/apple-touch-icon-60x60.png b/docs/apple-touch-icon-60x60.png new file mode 100644 index 0000000..1925f2b Binary files /dev/null and b/docs/apple-touch-icon-60x60.png differ diff --git a/docs/apple-touch-icon-76x76.png b/docs/apple-touch-icon-76x76.png new file mode 100644 index 0000000..6074fa7 Binary files /dev/null and b/docs/apple-touch-icon-76x76.png differ diff --git a/docs/apple-touch-icon.png b/docs/apple-touch-icon.png new file mode 100644 index 0000000..abfc0b9 Binary files /dev/null and b/docs/apple-touch-icon.png differ diff --git a/docs/articles/csv_to_cv.png b/docs/articles/csv_to_cv.png new file mode 100644 index 0000000..a0e6369 Binary files /dev/null and b/docs/articles/csv_to_cv.png differ diff --git a/docs/articles/how_to_copy_data.png b/docs/articles/how_to_copy_data.png new file mode 100644 index 0000000..742466c Binary files /dev/null and b/docs/articles/how_to_copy_data.png differ diff --git a/docs/articles/html_vs_pdf_output.png b/docs/articles/html_vs_pdf_output.png new file mode 100644 index 0000000..eacc1ed Binary files /dev/null and b/docs/articles/html_vs_pdf_output.png differ diff --git a/docs/articles/index.html b/docs/articles/index.html new file mode 100644 index 0000000..86c2bed --- /dev/null +++ b/docs/articles/index.html @@ -0,0 +1,146 @@ + + + + + + + + +Articles • datadrivencv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    All vignettes

    +

    + + +
    +
    +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/articles/setting_up_your_cv.html b/docs/articles/setting_up_your_cv.html new file mode 100644 index 0000000..cbc72c3 --- /dev/null +++ b/docs/articles/setting_up_your_cv.html @@ -0,0 +1,407 @@ + + + + + + + +Setting up your CV with datadrivencv • datadrivencv + + + + + + + + + + +
    +
    + + + + +
    +
    + + + + +
    +

    +Goal

    +

    This document will provide a small bit of context for the creation of the datadrivencv package and a step-through of going from zero to html and pdf CV built from your data.

    +
    +
    +

    +Motivation

    +
    +

    +Updating a CV is not fun

    +

    Anytime I would go to add something to my CV I ended up wanting to change the format a tiny bit. This usually meant the entire word document completely falling apart and needing to have each entry copied and pasted into a new version.

    +

    Ultimately this process felt formulaic and repetitive, prime indicators they could be done better with code. Using a spreadsheet to store each entry in the CV and R to write markdown seemed like the way to go. Pagedown made this even easier. Meaning that the same CV could be rendered with interactive HTML and PDF without changing the underlying rendering engine like was done with kniting to pdf vs knitting to html.

    +
    + +
    +
    +
    +

    +No lock-in

    +

    Inspired heavily the the usethis package, datadrivencv strives to make itself unnecessary. The main function is use_data_driven_cv, which sets up the files you need to build your CV. These files are self-contained meaning if you uninstall datadrivencv your CV will still knit fine. All the R code logic is contained in a sourced script so if you want to change it you can do so.

    +

    The package aims to bootstrap you to a working data-driven CV pipeline. Serving as a jumping off point for you to build your own custom CV, you may at first want to leave it as is and then slowly tweak things to keep it fresh. You have all the code, so you can!

    +
    +
    +
    +

    +Using it

    +

    The first step to using the package is the use_data_driven_cv() function. This function takes a few input parameters and when when run, sets up a series of files in your current working directory. E.g.

    +
    # run ?datadrivencv::use_datadriven_cv to see more details
    +datadrivencv::use_datadriven_cv(
    +  full_name = "Nick Strayer",
    +  data_location = "https://docs.google.com/spreadsheets/d/14MQICF2F8-vf8CKPF1m4lyGKO6_thG-4aSwat1e2TWc",
    +  pdf_location = "https://github.com/nstrayer/cv/raw/master/strayer_cv.pdf",
    +  html_location = "nickstrayer.me/cv/",
    +  source_location = "https://github.com/nstrayer/cv"
    +)
    +

    The available arguments are:

    + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ArgumentDescription
    full_nameYour full name, used in title of document and header
    data_locationPath of the spreadsheets holding all your data. This can be either a URL to a google sheet with multiple sheets containing the four data types or a path to a folder containing four .csvs with the neccesary data.
    pdf_locationWhat location will the PDF of this CV be hosted at?
    html_locationWhat location will the HTML version of this CV be hosted at?
    source_locationWhere is the code to build your CV hosted?
    open_filesShould the added files be opened after creation?
    use_network_logoShould logo be an interactive network based on your CV data? Note that this uses the function build_network_logo() so will introduce a dependency on this package.
    +

    This code is all that’s needed to setup a full CV. It outputs five files:

    + ++++ + + + + + + + + + + + + + + + + + + + + + + +
    FileDescription
    cv.rmdAn RMarkdown file with various sections filled in. Edit this to fit your personal needs.
    dd_cv.cssA custom set of CSS styles that build on the default Pagedown “resume” template. Again, edit these as desired.
    render_cv.rUse this script to build your CV in both PDF and HTML at the same time.
    cv_printing_functions.rA series of functions that perform the dirty work of turning your spreadsheet data into markdown/html and making that output work for PDF printing. E.g. Replacing markdown links with superscripts and a links section, tweaking the CSS to account for chrome printing quirks, etc..
    +
    +
    +

    +Storing your data in spreadsheets

    +

    By default the googlesheets4 package is used to get a Google Sheet with all necessary data. To build your own version I suggest simply copying my data, removing all the rows, and filling in with your data.

    +
    + +
    +
    +

    +Format of spreadsheets:

    +

    There are four spreadsheets of “data” that are used. These take the form of separate sub-sheets within a google sheet.

    +
    + +
    +

    The four spreadsheets that are needed and their columns are:

    +
    +

    +entries +

    + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ColumnDescription
    sectionWhere in your CV this entry belongs
    titleMain title of the entry
    locLocation the entry occured
    institutionPrimary institution affiliation for entry
    startStart date of entry (year). Can be left blank for single point events like a manuscript.
    endEnd year of entry. Set to “current” if entry is still ongoing.
    description_*Each description column is a separate bullet point for the entry. If you need more description bullet points simply add a new column with title “description_{4,5,..}”
    +
    +
    +

    +language_skills +

    + + + + + + + + + + + + + + + +
    ColumnDescription
    skillName of language
    levelRelative numeric level of skill
    +
    +
    +

    +text_blocks +

    + + + + + + + + + + + + + + + +
    ColumnDescription
    locId used for finding text block
    textContents of text block. Supports markdown formatting.
    +
    +
    +

    +contact info +

    + + + + + + + + + + + + + + + + + + + +
    ColumnDescription
    locId of contact section
    iconIcon used from font-awesome 4 to label this contact section
    contactThe actual value written for the contact entry
    +
    +
    +
    +

    +Using .csvs instead of google sheets

    +

    Don’t want to use google sheets to store your data? Not a problem. Just make four .csvs (entries.csv, language_skills.csv, text_blocks.csv, contact_info.csv) that have the same matching format as above and pass the folder containing those as your data_location when initializing with use_datadriven_cv().

    +

    The function use_csv_data_storage() will set these up for you.

    +
    +
    +
    +

    +Rendering your CV

    +

    Now that you have the templates setup and you’ve configured your data, the last thing to do is render. The easiest way to do this is by opening in RStudio and clicking the “Knit” button. This will render an HTML version of your CV. However, you most likely want a PDF version of your CV to go along with an HTML version. The easiest way to do this is to run the included script render_cv.rr_cv.R:

    +
    +

    +render_cv.r +

    +
    # Knit the HTML version
    +rmarkdown::render("cv.rmd",
    +                  params = list(pdf_mode = FALSE),
    +                  output_file = "cv.html")
    +
    +# Knit the PDF version to temporary html location
    +tmp_html_cv_loc <- fs::file_temp(ext = ".html")
    +rmarkdown::render("cv.rmd",
    +                  params = list(pdf_mode = TRUE),
    +                  output_file = tmp_html_cv_loc)
    +
    +# Convert to PDF using Pagedown
    +pagedown::chrome_print(input = tmp_html_cv_loc,
    +                       output = "cv.pdf")
    +
    + +
    +

    This script will render your CV in HTML and output it as cv.html, it will also turn on the pdf_mode parameter in cv.rmd, which will strip the links out and place them at the end linked by inline superscripts. Once the pdf version is rendered to HTML, it will then turn that HTML into a PDF using pagedown::chrome_print(). By using this script you can easily make sure your get both versions rendered at the same time without having to manually go in and toggle the pdf mode parameter in the yaml header and then use the print dialog in your browser.

    +
    +
    +
    +

    +Questions?

    +

    Confused by anything (there’s a lot to be confused by)? Open an issue on github and let me know. Not comfortable with github issues? Tweet the question at me on Twitter: @nicholasstrayer.

    +
    +
    + + + +
    + + + +
    + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + diff --git a/docs/articles/sub_sheets.png b/docs/articles/sub_sheets.png new file mode 100644 index 0000000..7d846bc Binary files /dev/null and b/docs/articles/sub_sheets.png differ diff --git a/docs/authors.html b/docs/authors.html new file mode 100644 index 0000000..9c13f4e --- /dev/null +++ b/docs/authors.html @@ -0,0 +1,149 @@ + + + + + + + + +Authors • datadrivencv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
      +
    • +

      Nick Strayer. Maintainer. +

      +
    • +
    + +
    + +
    + + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/docsearch.css b/docs/docsearch.css new file mode 100644 index 0000000..e5f1fe1 --- /dev/null +++ b/docs/docsearch.css @@ -0,0 +1,148 @@ +/* Docsearch -------------------------------------------------------------- */ +/* + Source: https://github.com/algolia/docsearch/ + License: MIT +*/ + +.algolia-autocomplete { + display: block; + -webkit-box-flex: 1; + -ms-flex: 1; + flex: 1 +} + +.algolia-autocomplete .ds-dropdown-menu { + width: 100%; + min-width: none; + max-width: none; + padding: .75rem 0; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, .1); + box-shadow: 0 .5rem 1rem rgba(0, 0, 0, .175); +} + +@media (min-width:768px) { + .algolia-autocomplete .ds-dropdown-menu { + width: 175% + } +} + +.algolia-autocomplete .ds-dropdown-menu::before { + display: none +} + +.algolia-autocomplete .ds-dropdown-menu [class^=ds-dataset-] { + padding: 0; + background-color: rgb(255,255,255); + border: 0; + max-height: 80vh; +} + +.algolia-autocomplete .ds-dropdown-menu .ds-suggestions { + margin-top: 0 +} + +.algolia-autocomplete .algolia-docsearch-suggestion { + padding: 0; + overflow: visible +} + +.algolia-autocomplete .algolia-docsearch-suggestion--category-header { + padding: .125rem 1rem; + margin-top: 0; + font-size: 1.3em; + font-weight: 500; + color: #00008B; + border-bottom: 0 +} + +.algolia-autocomplete .algolia-docsearch-suggestion--wrapper { + float: none; + padding-top: 0 +} + +.algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column { + float: none; + width: auto; + padding: 0; + text-align: left +} + +.algolia-autocomplete .algolia-docsearch-suggestion--content { + float: none; + width: auto; + padding: 0 +} + +.algolia-autocomplete .algolia-docsearch-suggestion--content::before { + display: none +} + +.algolia-autocomplete .ds-suggestion:not(:first-child) .algolia-docsearch-suggestion--category-header { + padding-top: .75rem; + margin-top: .75rem; + border-top: 1px solid rgba(0, 0, 0, .1) +} + +.algolia-autocomplete .ds-suggestion .algolia-docsearch-suggestion--subcategory-column { + display: block; + padding: .1rem 1rem; + margin-bottom: 0.1; + font-size: 1.0em; + font-weight: 400 + /* display: none */ +} + +.algolia-autocomplete .algolia-docsearch-suggestion--title { + display: block; + padding: .25rem 1rem; + margin-bottom: 0; + font-size: 0.9em; + font-weight: 400 +} + +.algolia-autocomplete .algolia-docsearch-suggestion--text { + padding: 0 1rem .5rem; + margin-top: -.25rem; + font-size: 0.8em; + font-weight: 400; + line-height: 1.25 +} + +.algolia-autocomplete .algolia-docsearch-footer { + width: 110px; + height: 20px; + z-index: 3; + margin-top: 10.66667px; + float: right; + font-size: 0; + line-height: 0; +} + +.algolia-autocomplete .algolia-docsearch-footer--logo { + background-image: url("data:image/svg+xml;utf8,"); + background-repeat: no-repeat; + background-position: 50%; + background-size: 100%; + overflow: hidden; + text-indent: -9000px; + width: 100%; + height: 100%; + display: block; + transform: translate(-8px); +} + +.algolia-autocomplete .algolia-docsearch-suggestion--highlight { + color: #FF8C00; + background: rgba(232, 189, 54, 0.1) +} + + +.algolia-autocomplete .algolia-docsearch-suggestion--text .algolia-docsearch-suggestion--highlight { + box-shadow: inset 0 -2px 0 0 rgba(105, 105, 105, .5) +} + +.algolia-autocomplete .ds-suggestion.ds-cursor .algolia-docsearch-suggestion--content { + background-color: rgba(192, 192, 192, .15) +} diff --git a/docs/docsearch.js b/docs/docsearch.js new file mode 100644 index 0000000..b35504c --- /dev/null +++ b/docs/docsearch.js @@ -0,0 +1,85 @@ +$(function() { + + // register a handler to move the focus to the search bar + // upon pressing shift + "/" (i.e. "?") + $(document).on('keydown', function(e) { + if (e.shiftKey && e.keyCode == 191) { + e.preventDefault(); + $("#search-input").focus(); + } + }); + + $(document).ready(function() { + // do keyword highlighting + /* modified from https://jsfiddle.net/julmot/bL6bb5oo/ */ + var mark = function() { + + var referrer = document.URL ; + var paramKey = "q" ; + + if (referrer.indexOf("?") !== -1) { + var qs = referrer.substr(referrer.indexOf('?') + 1); + var qs_noanchor = qs.split('#')[0]; + var qsa = qs_noanchor.split('&'); + var keyword = ""; + + for (var i = 0; i < qsa.length; i++) { + var currentParam = qsa[i].split('='); + + if (currentParam.length !== 2) { + continue; + } + + if (currentParam[0] == paramKey) { + keyword = decodeURIComponent(currentParam[1].replace(/\+/g, "%20")); + } + } + + if (keyword !== "") { + $(".contents").unmark({ + done: function() { + $(".contents").mark(keyword); + } + }); + } + } + }; + + mark(); + }); +}); + +/* Search term highlighting ------------------------------*/ + +function matchedWords(hit) { + var words = []; + + var hierarchy = hit._highlightResult.hierarchy; + // loop to fetch from lvl0, lvl1, etc. + for (var idx in hierarchy) { + words = words.concat(hierarchy[idx].matchedWords); + } + + var content = hit._highlightResult.content; + if (content) { + words = words.concat(content.matchedWords); + } + + // return unique words + var words_uniq = [...new Set(words)]; + return words_uniq; +} + +function updateHitURL(hit) { + + var words = matchedWords(hit); + var url = ""; + + if (hit.anchor) { + url = hit.url_without_anchor + '?q=' + escape(words.join(" ")) + '#' + hit.anchor; + } else { + url = hit.url + '?q=' + escape(words.join(" ")); + } + + return url; +} diff --git a/docs/favicon-16x16.png b/docs/favicon-16x16.png new file mode 100644 index 0000000..3e3b478 Binary files /dev/null and b/docs/favicon-16x16.png differ diff --git a/docs/favicon-32x32.png b/docs/favicon-32x32.png new file mode 100644 index 0000000..825ba81 Binary files /dev/null and b/docs/favicon-32x32.png differ diff --git a/docs/favicon.ico b/docs/favicon.ico new file mode 100644 index 0000000..619edf1 Binary files /dev/null and b/docs/favicon.ico differ diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000..efacf99 --- /dev/null +++ b/docs/index.html @@ -0,0 +1,417 @@ + + + + + + + +An R package for building your CV with data • datadrivencv + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    +
    + +
    + + + +

    The goal of datadrivencv is to ease the burden of maintaining a CV by separating the content from the output by treating entries as data.

    +
    +

    +Installation

    +

    The development version from GitHub with:

    +
    # install.packages("devtools")
    +devtools::install_github("nstrayer/datadrivencv")
    +
    +
    +
    +

    +Motivation

    +
    +

    +Updating a CV is not fun

    +

    Anytime I would go to add something to my CV I ended up wanting to change the format a tiny bit. This usually meant the entire word document completely falling apart and needing to have each entry copied and pasted into a new version.

    +

    Ultimately this process felt formulaic and repetitive, prime indicators they could be done better with code. Using a spreadsheet to store each entry in the CV and R to write markdown seemed like the way to go. Pagedown made this even easier. Meaning that the same CV could be rendered with interactive HTML and PDF without changing the underlying rendering engine like was done with kniting to pdf vs knitting to html.

    +
    + + +
    +
    +
    +

    +No lock-in

    +

    Inspired heavily the the usethis package, datadrivencv strives to make itself unnecessary. The main function is use_data_driven_cv, which sets up the files you need to build your CV. These files are self-contained meaning if you uninstall datadrivencv your CV will still knit fine. All the R code logic is contained in a sourced script so if you want to change it you can do so.

    +

    The package aims to bootstrap you to a working data-driven CV pipeline. Serving as a jumping off point for you to build your own custom CV, you may at first want to leave it as is and then slowly tweak things to keep it fresh. You have all the code, so you can!

    +
    +
    +
    +

    +Using it

    +

    The first step to using the package is the use_data_driven_cv() function. This function takes a few input parameters and when when run, sets up a series of files in your current working directory. E.g.

    +
    # run ?datadrivencv::use_datadriven_cv to see more details
    +datadrivencv::use_datadriven_cv(
    +  full_name = "Nick Strayer",
    +  data_location = "https://docs.google.com/spreadsheets/d/14MQICF2F8-vf8CKPF1m4lyGKO6_thG-4aSwat1e2TWc",
    +  pdf_location = "https://github.com/nstrayer/cv/raw/master/strayer_cv.pdf",
    +  html_location = "nickstrayer.me/cv/",
    +  source_location = "https://github.com/nstrayer/cv"
    +)
    +

    The available arguments are:

    + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ArgumentDescription
    full_nameYour full name, used in title of document and header
    data_locationPath of the spreadsheets holding all your data. This can be either a URL to a google sheet with multiple sheets containing the four data types or a path to a folder containing four .csvs with the neccesary data.
    pdf_locationWhat location will the PDF of this CV be hosted at?
    html_locationWhat location will the HTML version of this CV be hosted at?
    source_locationWhere is the code to build your CV hosted?
    open_filesShould the added files be opened after creation?
    use_network_logoShould logo be an interactive network based on your CV data? Note that this uses the function build_network_logo() so will introduce a dependency on this package.
    +

    This code is all that’s needed to setup a full CV. It outputs five files:

    + ++++ + + + + + + + + + + + + + + + + + + + + + + +
    FileDescription
    cv.rmdAn RMarkdown file with various sections filled in. Edit this to fit your personal needs.
    dd_cv.cssA custom set of CSS styles that build on the default Pagedown “resume” template. Again, edit these as desired.
    render_cv.rUse this script to build your CV in both PDF and HTML at the same time.
    cv_printing_functions.rA series of functions that perform the dirty work of turning your spreadsheet data into markdown/html and making that output work for PDF printing. E.g. Replacing markdown links with superscripts and a links section, tweaking the CSS to account for chrome printing quirks, etc..
    +
    +
    +

    +Storing your data in spreadsheets

    +

    By default the googlesheets4 package is used to get a Google Sheet with all necessary data. To build your own version I suggest simply copying my data, removing all the rows, and filling in with your data.

    +
    + + +
    +
    +

    +Format of spreadsheets:

    +

    There are four spreadsheets of “data” that are used. These take the form of separate sub-sheets within a google sheet.

    +
    + + +
    +

    The four spreadsheets that are needed and their columns are:

    +
    +

    +entries +

    + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ColumnDescription
    sectionWhere in your CV this entry belongs
    titleMain title of the entry
    locLocation the entry occured
    institutionPrimary institution affiliation for entry
    startStart date of entry (year). Can be left blank for single point events like a manuscript.
    endEnd year of entry. Set to “current” if entry is still ongoing.
    description_*Each description column is a separate bullet point for the entry. If you need more description bullet points simply add a new column with title “description_{4,5,..}”
    +
    +
    +

    +language_skills +

    + + + + + + + + + + + + + + + +
    ColumnDescription
    skillName of language
    levelRelative numeric level of skill
    +
    +
    +

    +text_blocks +

    + + + + + + + + + + + + + + + +
    ColumnDescription
    locId used for finding text block
    textContents of text block. Supports markdown formatting.
    +
    +
    +

    +contact info +

    + + + + + + + + + + + + + + + + + + + +
    ColumnDescription
    locId of contact section
    iconIcon used from font-awesome 4 to label this contact section
    contactThe actual value written for the contact entry
    +
    +
    +
    +

    +Using .csvs instead of google sheets

    +

    Don’t want to use google sheets to store your data? Not a problem. Just make four .csvs (entries.csv, language_skills.csv, text_blocks.csv, contact_info.csv) that have the same matching format as above and pass the folder containing those as your data_location when initializing with use_datadriven_cv().

    +

    The function use_csv_data_storage() will set these up for you.

    +
    +
    +
    +

    +Rendering your CV

    +

    Now that you have the templates setup and you’ve configured your data, the last thing to do is render. The easiest way to do this is by opening cv.rmd in RStudio and clicking the “Knit” button. This will render an HTML version of your CV. However, you most likely want a PDF version of your CV to go along with an HTML version. The easiest way to do this is to run the included script render_cv.r:

    +
    +

    +render_cv.r +

    +
    # Knit the HTML version
    +rmarkdown::render("cv.rmd",
    +                  params = list(pdf_mode = FALSE),
    +                  output_file = "cv.html")
    +
    +# Knit the PDF version to temporary html location
    +tmp_html_cv_loc <- fs::file_temp(ext = ".html")
    +rmarkdown::render("cv.rmd",
    +                  params = list(pdf_mode = TRUE),
    +                  output_file = tmp_html_cv_loc)
    +
    +# Convert to PDF using Pagedown
    +pagedown::chrome_print(input = tmp_html_cv_loc,
    +                       output = "cv.pdf")
    +
    + + +
    +

    This script will render your CV in HTML and output it as cv.html, it will also turn on the pdf_mode parameter in cv.rmd, which will strip the links out and place them at the end linked by inline superscripts. Once the pdf version is rendered to HTML, it will then turn that HTML into a PDF using pagedown::chrome_print(). By using this script you can easily make sure your get both versions rendered at the same time without having to manually go in and toggle the pdf mode parameter in the yaml header and then use the print dialog in your browser.

    +
    +
    +
    +

    +Questions?

    +

    Confused by anything (there’s a lot to be confused by)? Open an issue on github and let me know. Not comfortable with github issues? Tweet the question at me on Twitter: @nicholasstrayer.

    + + + + + + + +
    + +
    + + +
    + + +
    + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + diff --git a/docs/link.svg b/docs/link.svg new file mode 100644 index 0000000..88ad827 --- /dev/null +++ b/docs/link.svg @@ -0,0 +1,12 @@ + + + + + + diff --git a/docs/logo.png b/docs/logo.png new file mode 100644 index 0000000..a0e6369 Binary files /dev/null and b/docs/logo.png differ diff --git a/docs/logo.svg b/docs/logo.svg new file mode 100644 index 0000000..3f7665c --- /dev/null +++ b/docs/logo.svg @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/pkgdown.css b/docs/pkgdown.css new file mode 100644 index 0000000..9145958 --- /dev/null +++ b/docs/pkgdown.css @@ -0,0 +1,256 @@ +/* Sticky footer */ + +/** + * Basic idea: https://philipwalton.github.io/solved-by-flexbox/demos/sticky-footer/ + * Details: https://github.com/philipwalton/solved-by-flexbox/blob/master/assets/css/components/site.css + * + * .Site -> body > .container + * .Site-content -> body > .container .row + * .footer -> footer + * + * Key idea seems to be to ensure that .container and __all its parents__ + * have height set to 100% + * + */ + +html, body { + height: 100%; +} + +body > .container { + display: flex; + height: 100%; + flex-direction: column; +} + +body > .container .row { + flex: 1 0 auto; +} + +footer { + margin-top: 45px; + padding: 35px 0 36px; + border-top: 1px solid #e5e5e5; + color: #666; + display: flex; + flex-shrink: 0; +} +footer p { + margin-bottom: 0; +} +footer div { + flex: 1; +} +footer .pkgdown { + text-align: right; +} +footer p { + margin-bottom: 0; +} + +img.icon { + float: right; +} + +img { + max-width: 100%; +} + +/* Fix bug in bootstrap (only seen in firefox) */ +summary { + display: list-item; +} + +/* Typographic tweaking ---------------------------------*/ + +.contents .page-header { + margin-top: calc(-60px + 1em); +} + +/* Section anchors ---------------------------------*/ + +a.anchor { + margin-left: -30px; + display:inline-block; + width: 30px; + height: 30px; + visibility: hidden; + + background-image: url(./link.svg); + background-repeat: no-repeat; + background-size: 20px 20px; + background-position: center center; +} + +.hasAnchor:hover a.anchor { + visibility: visible; +} + +@media (max-width: 767px) { + .hasAnchor:hover a.anchor { + visibility: hidden; + } +} + + +/* Fixes for fixed navbar --------------------------*/ + +.contents h1, .contents h2, .contents h3, .contents h4 { + padding-top: 60px; + margin-top: -40px; +} + +/* Sidebar --------------------------*/ + +#sidebar { + margin-top: 30px; + position: -webkit-sticky; + position: sticky; + top: 70px; +} +#sidebar h2 { + font-size: 1.5em; + margin-top: 1em; +} + +#sidebar h2:first-child { + margin-top: 0; +} + +#sidebar .list-unstyled li { + margin-bottom: 0.5em; +} + +.orcid { + height: 16px; + /* margins are required by official ORCID trademark and display guidelines */ + margin-left:4px; + margin-right:4px; + vertical-align: middle; +} + +/* Reference index & topics ----------------------------------------------- */ + +.ref-index th {font-weight: normal;} + +.ref-index td {vertical-align: top;} +.ref-index .icon {width: 40px;} +.ref-index .alias {width: 40%;} +.ref-index-icons .alias {width: calc(40% - 40px);} +.ref-index .title {width: 60%;} + +.ref-arguments th {text-align: right; padding-right: 10px;} +.ref-arguments th, .ref-arguments td {vertical-align: top;} +.ref-arguments .name {width: 20%;} +.ref-arguments .desc {width: 80%;} + +/* Nice scrolling for wide elements --------------------------------------- */ + +table { + display: block; + overflow: auto; +} + +/* Syntax highlighting ---------------------------------------------------- */ + +pre { + word-wrap: normal; + word-break: normal; + border: 1px solid #eee; +} + +pre, code { + background-color: #f8f8f8; + color: #333; +} + +pre code { + overflow: auto; + word-wrap: normal; + white-space: pre; +} + +pre .img { + margin: 5px 0; +} + +pre .img img { + background-color: #fff; + display: block; + height: auto; +} + +code a, pre a { + color: #375f84; +} + +a.sourceLine:hover { + text-decoration: none; +} + +.fl {color: #1514b5;} +.fu {color: #000000;} /* function */ +.ch,.st {color: #036a07;} /* string */ +.kw {color: #264D66;} /* keyword */ +.co {color: #888888;} /* comment */ + +.message { color: black; font-weight: bolder;} +.error { color: orange; font-weight: bolder;} +.warning { color: #6A0366; font-weight: bolder;} + +/* Clipboard --------------------------*/ + +.hasCopyButton { + position: relative; +} + +.btn-copy-ex { + position: absolute; + right: 0; + top: 0; + visibility: hidden; +} + +.hasCopyButton:hover button.btn-copy-ex { + visibility: visible; +} + +/* headroom.js ------------------------ */ + +.headroom { + will-change: transform; + transition: transform 200ms linear; +} +.headroom--pinned { + transform: translateY(0%); +} +.headroom--unpinned { + transform: translateY(-100%); +} + +/* mark.js ----------------------------*/ + +mark { + background-color: rgba(255, 255, 51, 0.5); + border-bottom: 2px solid rgba(255, 153, 51, 0.3); + padding: 1px; +} + +/* vertical spacing after htmlwidgets */ +.html-widget { + margin-bottom: 10px; +} + +/* fontawesome ------------------------ */ + +.fab { + font-family: "Font Awesome 5 Brands" !important; +} + +/* don't display links in code chunks when printing */ +/* source: https://stackoverflow.com/a/10781533 */ +@media print { + code a:link:after, code a:visited:after { + content: ""; + } +} diff --git a/docs/pkgdown.js b/docs/pkgdown.js new file mode 100644 index 0000000..087a762 --- /dev/null +++ b/docs/pkgdown.js @@ -0,0 +1,113 @@ +/* http://gregfranko.com/blog/jquery-best-practices/ */ +(function($) { + $(function() { + + $('.navbar-fixed-top').headroom(); + + $('body').css('padding-top', $('.navbar').height() + 10); + $(window).resize(function(){ + $('body').css('padding-top', $('.navbar').height() + 10); + }); + + $('body').scrollspy({ + target: '#sidebar', + offset: 60 + }); + + $('[data-toggle="tooltip"]').tooltip(); + + var cur_path = paths(location.pathname); + var links = $("#navbar ul li a"); + var max_length = -1; + var pos = -1; + for (var i = 0; i < links.length; i++) { + if (links[i].getAttribute("href") === "#") + continue; + // Ignore external links + if (links[i].host !== location.host) + continue; + + var nav_path = paths(links[i].pathname); + + var length = prefix_length(nav_path, cur_path); + if (length > max_length) { + max_length = length; + pos = i; + } + } + + // Add class to parent
  • , and enclosing
  • if in dropdown + if (pos >= 0) { + var menu_anchor = $(links[pos]); + menu_anchor.parent().addClass("active"); + menu_anchor.closest("li.dropdown").addClass("active"); + } + }); + + function paths(pathname) { + var pieces = pathname.split("/"); + pieces.shift(); // always starts with / + + var end = pieces[pieces.length - 1]; + if (end === "index.html" || end === "") + pieces.pop(); + return(pieces); + } + + // Returns -1 if not found + function prefix_length(needle, haystack) { + if (needle.length > haystack.length) + return(-1); + + // Special case for length-0 haystack, since for loop won't run + if (haystack.length === 0) { + return(needle.length === 0 ? 0 : -1); + } + + for (var i = 0; i < haystack.length; i++) { + if (needle[i] != haystack[i]) + return(i); + } + + return(haystack.length); + } + + /* Clipboard --------------------------*/ + + function changeTooltipMessage(element, msg) { + var tooltipOriginalTitle=element.getAttribute('data-original-title'); + element.setAttribute('data-original-title', msg); + $(element).tooltip('show'); + element.setAttribute('data-original-title', tooltipOriginalTitle); + } + + if(ClipboardJS.isSupported()) { + $(document).ready(function() { + var copyButton = ""; + + $(".examples, div.sourceCode").addClass("hasCopyButton"); + + // Insert copy buttons: + $(copyButton).prependTo(".hasCopyButton"); + + // Initialize tooltips: + $('.btn-copy-ex').tooltip({container: 'body'}); + + // Initialize clipboard: + var clipboardBtnCopies = new ClipboardJS('[data-clipboard-copy]', { + text: function(trigger) { + return trigger.parentNode.textContent; + } + }); + + clipboardBtnCopies.on('success', function(e) { + changeTooltipMessage(e.trigger, 'Copied!'); + e.clearSelection(); + }); + + clipboardBtnCopies.on('error', function() { + changeTooltipMessage(e.trigger,'Press Ctrl+C or Command+C to copy'); + }); + }); + } +})(window.jQuery || window.$) diff --git a/docs/pkgdown.yml b/docs/pkgdown.yml new file mode 100644 index 0000000..a5e36a2 --- /dev/null +++ b/docs/pkgdown.yml @@ -0,0 +1,5 @@ +pandoc: 2.7.3 +pkgdown: 1.4.1 +pkgdown_sha: ~ +articles: [] + diff --git a/docs/reference/CV_Printer.html b/docs/reference/CV_Printer.html new file mode 100644 index 0000000..b10efaa --- /dev/null +++ b/docs/reference/CV_Printer.html @@ -0,0 +1,321 @@ + + + + + + + + +R6 Class to print components of CV from data — CV_Printer • datadrivencv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    R6 Class to print components of CV from data

    +

    R6 Class to print components of CV from data

    +
    + + + +

    Details

    + +

    This class is initiated at the head of your CV or Resume Rmarkdown file and +then through various print_* methods, builds the various components.

    +

    Public fields

    + +

    +
    position_data

    data frame of positions by row with columns: +

      +
    • section What type of position entry,

    • +
    • title Title of entry,

    • +
    • loc Where the position took place,

    • +
    • institution Institution the position was associated with,

    • +
    • start Start year of position

    • +
    • end End year of position,

    • +
    • in_resume Logical to filter what entries should be included in a resume (Not used for CV mode),

    • +
    • description_{1,2,...} Free form text fields to be added as description bullets. description_2 description_3

    • +

    + +
    skills

    data frame with two columns: +

      +
    • skill ID of skill

    • +
    • level Relative numeric level for skill

    • +

    + +
    text_blocks

    data frame with two columns: +

      +
    • loc Where this text lock is going in CV

    • +
    • text Actual text to be placed.

    • +

    + +
    contact_info

    data frame with three columns: +

      +
    • loc What the contact point is for (e.g. email)

    • +
    • icon Font-awesome 4 icon id for this contact point (e.g. "envelope")

    • +
    • contact Actual contact info such as nick@test.com.

    • +

    + +
    pdf_mode

    Is the output being rendered into a pdf? Aka do links need to be stripped?

    + +
    html_location

    Where will the html version of your CV be hosted?

    + +
    pdf_location

    Where will the pdf version of your CV be hosted?

    + +

    +

    Methods

    + + +

    Public methods

    + + +


    +

    Method new()

    +

    Create a CV_Printer object.

    Usage

    +

    CV_Printer$new(
    +  data_location,
    +  pdf_mode = FALSE,
    +  html_location,
    +  pdf_location,
    +  position_entry_template = default_position_entry_template,
    +  sheet_is_publicly_readable = TRUE
    +)

    + +

    Arguments

    +

    +
    data_location

    Path of the spreadsheets holding all your data. This can be +either a URL to a google sheet with multiple sheets containing the four +data types or a path to a folder containing four .csvs with the neccesary +data.

    + +
    pdf_mode

    Is the output being rendered into a pdf? Aka do links need +to be stripped?

    + +
    html_location

    What location will the HTML version of this CV be hosted at?

    + +
    pdf_location

    What location will the PDF of this CV be hosted at?

    + +
    position_entry_template

    A glue template for building position +entries.

    + +
    sheet_is_publicly_readable

    If you're using google sheets for data, +is the sheet publicly available? (Makes authorization easier.)

    + +
    source_location

    Where is the code to build your CV hosted?

    + +

    +

    Returns

    +

    A new CV_Printer object.

    +


    +

    Method set_pdf_mode()

    +

    Turn on pdf mode for class. Useful for when the class is cached to avoid re-downloading data.

    Usage

    +

    CV_Printer$set_pdf_mode(pdf_mode = TRUE)

    + +

    Arguments

    +

    +
    pdf_mode

    Are we turning PDF mode on?

    + +

    +


    +

    Method print_section()

    +

    Take a position data frame and the section id desired and prints the section to markdown.

    Usage

    +

    CV_Printer$print_section(section_id)

    + +

    Arguments

    +

    +
    section_id

    ID of the positions section to be printed as encoded by the section column of the positions table

    + +

    +


    +

    Method print_text_block()

    +

    Prints out text block identified by a given label.

    Usage

    +

    CV_Printer$print_text_block(label)

    + +

    Arguments

    +

    +
    label

    ID of the text block to print as encoded in label column of text_blocks table.

    + +

    +


    +

    Method print_skill_bars()

    +

    Construct a bar chart of skills

    Usage

    +

    CV_Printer$print_skill_bars(out_of = 5)

    + +

    Arguments

    +

    +
    out_of

    The relative maximum for skills. Used to set what a fully filled in skill bar is.

    + +

    +


    +

    Method print_links()

    +

    List of all links in document labeled by their superscript integer.

    Usage

    +

    CV_Printer$print_links()

    + +


    +

    Method print_contact_info()

    +

    Contact information section with icons

    Usage

    +

    CV_Printer$print_contact_info()

    + +


    +

    Method print_link_to_other_format()

    +

    Small addendum that links to pdf version of CV if currently HTML and HTML if currently PDF.

    Usage

    +

    CV_Printer$print_link_to_other_format()

    + +


    +

    Method set_style()

    +

    Appends some styles specific to PDF output. +glue template for building position entries +Internal array holding all the links that have been stripped in the order they were stripped.

    Usage

    +

    CV_Printer$set_style()

    + +


    +

    Method clone()

    +

    The objects of this class are cloneable with this method.

    Usage

    +

    CV_Printer$clone(deep = FALSE)

    + +

    Arguments

    +

    +
    deep

    Whether to make a deep clone.

    + +

    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/build_network_logo.html b/docs/reference/build_network_logo.html new file mode 100644 index 0000000..0842ccb --- /dev/null +++ b/docs/reference/build_network_logo.html @@ -0,0 +1,174 @@ + + + + + + + + +Build interactive network logo — build_network_logo • datadrivencv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Constructs a network based on your position data to be used as a logo. +Interactive in HTML version and static in the PDF version. Notes are entries, +colored by section and connected if they occurred in the same year

    +
    + +
    build_network_logo(position_data)
    + +

    Arguments

    + + + + + + +
    position_data

    position data from your CV_Printer class.

    + +

    Value

    + +

    Interactive force-directed layout network of your CV data

    + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/figures/csv_to_cv.png b/docs/reference/figures/csv_to_cv.png new file mode 100644 index 0000000..a0e6369 Binary files /dev/null and b/docs/reference/figures/csv_to_cv.png differ diff --git a/docs/reference/figures/how_to_copy_data.png b/docs/reference/figures/how_to_copy_data.png new file mode 100644 index 0000000..742466c Binary files /dev/null and b/docs/reference/figures/how_to_copy_data.png differ diff --git a/docs/reference/figures/html_vs_pdf_output.png b/docs/reference/figures/html_vs_pdf_output.png new file mode 100644 index 0000000..eacc1ed Binary files /dev/null and b/docs/reference/figures/html_vs_pdf_output.png differ diff --git a/docs/reference/figures/logo.png b/docs/reference/figures/logo.png new file mode 100644 index 0000000..a0e6369 Binary files /dev/null and b/docs/reference/figures/logo.png differ diff --git a/docs/reference/figures/logo.svg b/docs/reference/figures/logo.svg new file mode 100644 index 0000000..3f7665c --- /dev/null +++ b/docs/reference/figures/logo.svg @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/figures/sub_sheets.png b/docs/reference/figures/sub_sheets.png new file mode 100644 index 0000000..7d846bc Binary files /dev/null and b/docs/reference/figures/sub_sheets.png differ diff --git a/docs/reference/index.html b/docs/reference/index.html new file mode 100644 index 0000000..6b7e073 --- /dev/null +++ b/docs/reference/index.html @@ -0,0 +1,189 @@ + + + + + + + + +Function reference • datadrivencv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +

    All functions

    +

    +
    +

    build_network_logo()

    +

    Build interactive network logo

    +

    use_csv_data_storage()

    +

    Use CSVs for storing data

    +

    use_datadriven_cv()

    +

    Use Data Driven CV template

    +

    use_ddcv_template()

    +

    Use template file from package

    +
    + + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/pipe.html b/docs/reference/pipe.html new file mode 100644 index 0000000..039a4a7 --- /dev/null +++ b/docs/reference/pipe.html @@ -0,0 +1,157 @@ + + + + + + + + +Pipe operator — %>% • datadrivencv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    See magrittr::%>% for details.

    +
    + +
    lhs %>% rhs
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/use_csv_data_storage.html b/docs/reference/use_csv_data_storage.html new file mode 100644 index 0000000..0f2a008 --- /dev/null +++ b/docs/reference/use_csv_data_storage.html @@ -0,0 +1,189 @@ + + + + + + + + +Use CSVs for storing data — use_csv_data_storage • datadrivencv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Sets up examples of the four CSVs needed for building CV

    +
    + +
    use_csv_data_storage(folder_name = "data", create_output_dir = TRUE)
    + +

    Arguments

    + + + + + + + + + + +
    folder_name

    Name of the folder you want csvs stored in relative to current working directory

    create_output_dir

    If the requested output directory is missing should it be created?

    + +

    Value

    + +

    A new folder <folder_name>/ with entries.csv, text_blocks.csv, language_skills.csv, and contact_info.csv in it. +working directory.

    + +

    Examples

    +
    +# Make a temp directory for placing files +# This would be a real location for a typical situation +temp_dir <- fs::dir_create(fs::path(tempdir(), "cv_w_csvs")) + +datadrivencv::use_csv_data_storage( + folder_name = fs::path(temp_dir, "csv_data"), + create_output_dir = TRUE +)
    #> [1] "Copied CSVs to /var/folders/d4/cbr0wtz50gb69vd8pqt65w0m0000gn/T/RtmpQEX7dq/cv_w_csvs/csv_data"
    +list.files(fs::path(temp_dir, "csv_data"))
    #> [1] "contact_info.csv" "entries.csv" "language_skills.csv" +#> [4] "text_blocks.csv"
    +
    +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/use_datadriven_cv.html b/docs/reference/use_datadriven_cv.html new file mode 100644 index 0000000..a81c474 --- /dev/null +++ b/docs/reference/use_datadriven_cv.html @@ -0,0 +1,253 @@ + + + + + + + + +Use Data Driven CV template — use_datadriven_cv • datadrivencv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Sets up the .Rmd file for a data-driven cv in current working directory. +Also adds css file for current CV so style can be custommized.

    +
    + +
    use_datadriven_cv(
    +  full_name = "Sarah Arcos",
    +  data_location = system.file("sample_data/", package = "datadrivencv"),
    +  pdf_location = "https://github.com/nstrayer/cv/raw/master/strayer_cv.pdf",
    +  html_location = "nickstrayer.me/datadrivencv/",
    +  source_location = "https://github.com/nstrayer/datadrivencv",
    +  which_files = "all",
    +  output_dir = getwd(),
    +  create_output_dir = FALSE,
    +  use_network_logo = TRUE,
    +  open_files = TRUE
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    full_name

    Your full name, used in title of document and header

    data_location

    Path of the spreadsheets holding all your data. This can +be either a URL to a google sheet with multiple sheets containing the four +data types or a path to a folder containing four .csvs with the neccesary +data. See use_csv_data_storage() for help setting up these +.csvs.

    pdf_location

    What location will the PDF of this CV be hosted at?

    html_location

    What location will the HTML version of this CV be hosted +at?

    source_location

    Where is the code to build your CV hosted?

    which_files

    What files should be placed? Takes a vector of possible +values c("cv.rmd", "dd_cv.css", "render_cv.r", "cv_printing_functions.r") +or "all" for everything. This can be used to incrementally update the +printing functions or CSS without loosing customizations you've made to +other files.

    output_dir

    Where should the files be placed? Defaults to your current working directory

    create_output_dir

    If the requested output directory is missing should it be created?

    use_network_logo

    Should logo be an interactive network based on your +CV data? Note that this uses the function +build_network_logo() so will introduce a dependency on this +package.

    open_files

    Should the added files be opened after creation?

    + +

    Value

    + +

    cv.rmd, dd_cv.css, render_cv.r, and cv_printing_functions.r +written to the current working directory.

    + +

    Examples

    +
    +# Make a temp directory for placing files +# This would be a real location for a typical situation +temp_dir <- fs::dir_create(fs::path(tempdir(), "my_cv")) + +use_datadriven_cv( + full_name = "Nick Strayer", + data_location = "https://docs.google.com/spreadsheets/d/14MQICF2F8-vf8CKPF1m4lyGKO6_thG-4aSwat1e2TWc", + pdf_location = "https://github.com/nstrayer/cv/raw/master/strayer_cv.pdf", + html_location = "nickstrayer.me/cv/", + source_location = "https://github.com/nstrayer/cv", + output_dir = temp_dir, + open_files = FALSE +) + +# Files should be where they were requested +list.files(temp_dir)
    #> [1] "cv_printing_functions.r" "cv.rmd" +#> [3] "dd_cv.css" "render_cv.r"
    +
    +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/use_ddcv_template.html b/docs/reference/use_ddcv_template.html new file mode 100644 index 0000000..63fd420 --- /dev/null +++ b/docs/reference/use_ddcv_template.html @@ -0,0 +1,198 @@ + + + + + + + + +Use template file from package — use_ddcv_template • datadrivencv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Use template file from package

    +
    + +
    use_ddcv_template(
    +  file_name,
    +  params = NULL,
    +  output_file_name = file_name,
    +  output_dir = getwd(),
    +  create_output_dir = FALSE,
    +  warn_about_no_change = TRUE,
    +  open_after_making = FALSE
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    file_name

    Name of file from templates to use: e.g. cv.rmd.

    params

    Parameters used to fill in whisker template

    output_file_name

    Name of file after being placed.

    output_dir

    Directory location for output to be placed in.

    create_output_dir

    If the requested output directory is missing should it be created?

    warn_about_no_change

    If there is no change between the new file and what was already there, should a warning be issued?

    open_after_making

    Should the file be opened after it has been written?

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.4.1.

    +
    + +
    +
    + + + + + + + + diff --git a/en/index.html b/en/index.html deleted file mode 100644 index 71a9665..0000000 --- a/en/index.html +++ /dev/null @@ -1,32758 +0,0 @@ - - - - - - - - - - - bmuniz_resume_en - - - - - - - - - - - - - - - -
    -

    Aside

    - -
    -

    Tech skills

    -

    -
    -
    -

    R skills

    -

    -
    -
    -

    Languages

    -
      -
    • Portuguese
    • -
    • English
    • -
    • Spanish
    • -
    -
    -
    -

    Disclaimer

    -

    Made w/ pagedown.
    -Source code: Github repo.

    -

    Last updated on 2023-12-02.

    -
    -
    -
    -

    Main

    -
    -

    Bianca Muniz

    -

    I enjoy working with data to find and deliver insights, solve business problems, and build tools that help people work more efficiently. I moved into data science after 10+ years working in public policy and public relations.

    -
    -
    -

    Professional Experience

    -
    -

    Data Analyst

    -

    Agência Pública de Jornalismo Investigativo

    -

    Sao Paulo-SP

    -

    Present - 2022

    -
      -
    • Create automation solutions for the data journalism workflow and assist in works that are data-driven
    • -
    • Participated in the Map of Conflict, a project by the investigative journalism outlet Agência Pública, in partnership with the Pastoral Land Commission (CPT). Using original data analysis, it investigates instances of rural conflict in Brazil’s Legal Amazon region in the last decade (2011-2020)
    • -
    -
    -
    -

    Data Journalism Intern

    -

    Agência Pública de Jornalismo Investigativo

    -

    N/A

    -

    2022 - 2020

    -
      -
    • Participated in coverage recognized for the Roche Health Journalism Award and finalist reports of Sigma Awards and Claudio Weber Abramo Award from Data Journalism.
    • -
    • -
    -
    -
    -

    Vice-President Director

    -

    Jornalismo Júnior

    -

    N/A

    -

    2019

    -
      -
    • Headed the financial planning of the Jornalismo Junior, with activities like: cash flow update, and legal regulation
    • -
    • Managed internal and external projects. One of them was “São Paulo sem Migué”, a fact-checking project about the city of São Paulo
    • -
    -
    -
    -

    News reporter

    -

    Jornalismo Júnior

    -

    N/A

    -

    2018

    -
      -
    • Wrote articles about sciences, sports, culture and politics
    • -
    -
    -
    -
    -

    Education

    -
    -

    Bachelor of Journalism

    -

    University of São Paulo (USP)

    -

    N/A

    -

    2023 - 2018

    -
      -
    • Member of the Junior Enterprise of Journalism at ECA -USP (2018 - 2019)
    • -
    • Monitor of the Summer Courses of the Institute of Mathematics and Statistics (IME) of USP “Python for Data Analysis” (2022) and “R for data analysis” (2023)
    • -
    -
    -
    -

    Master in Data Journalism, Automation and Data Storytelling

    -

    Insper

    -

    N/A

    -

    2022 - 2021

    -
      -
    • Specialization with double certification
    • -
    • -
    -
    -
    -

    MS in Pharmacology

    -

    Federal University of São Paulo - UNIFESP

    -

    N/A

    -

    2022 - 2018

    -
      -
    • Master in Sciences from the Postgraduate Program in Pharmacology (Concept CAPES 6), with a project entitled “Pharmacological and Non-Farmacological Manipulations for the reestablishment of a” hedonic tone “in an animal model of schizophrenia: the SHR strain”
    • -
    • CAPES scholarship (2018 - 2019)
    • -
    -
    -
    -

    Bachelor of Biomedicine

    -

    Federal University of São Paulo - UNIFESP

    -

    N/A

    -

    2017 - 2014

    -
      -
    • Participated in the development of the “Patógenos em Jogo”, an extension project of Unifesp (2016)
    • -
    • Coordinated the XV Biomedicine Winter Course, at Unifesp (2017)
    • -
    -
    -
    -
    -

    Awards

    -
    -

    Awards, shortlists and honorable mentions

    -

    Associated with the Agência Pública de Jornalismo Investigativo

    -

    N/A

    -

    2021

    -
      -
    • Claudio Weber Abramo Award for Data Journalism - Shortlist (2021)
    • -
    • The Sigma Awards 2021 - Shortlisted with the work
    • -
    • Roche Health Journalism Award (Honorable Mention)
    • -
    • 39th Human Rights of Journalism Award
    • -
    • 39th Human Rights of Journalism Award
    • -
    • Claudio Weber Abramo Award for Data Journalism - Shortlist (2022)
    • -
    • The Sigma Awards 2022 - Shortlisted with the work “Map of Conflict”
    • -
    • Agência Nacional de Águas e Saneamento Award 2023 - Shortlisted
    • -
    • 39th Human Rights Award for Journalism (Honorable Mention)
    • -
    • Design for Better World Award 2022 - Winner with the work “Map of Conflict”
    • -
    -
    -
    -

    Scholarships and Fellowships

    -

    N/A

    -

    N/A

    -

    N/A

    -
      -
    • Entrepreneurship and Innovation Scholarship (Agência USP de Inovação - AUSPIN, 2022). With this scholarship, I developed a research project for four months at University of Texas at Austin about what data journalism teaching is like in the USA
    • -
    • Amazon Rainforest Journalism Fund, from Pulitzer Center (2021)
    • -
    -
    -
    -
    -

    Talks

    -
    -

    Lectures and workshops

    -

    N/A

    -

    N/A

    -

    N/A

    -
      -
    • Lecture: “Step by step of a data driven story with the Map of Conflict” (2023, “Coverage of Socio-Environmental Conflicts” - Portal Assobiar)
    • -
    • Lecture: “Intro to Data Journalism” at the “Communication Course: Practice and Reflection” (2023, Colabora)
    • -
    • Climate Data Laboratory - Alma Preta Jornalismo (2023, Escola de Dados)
    • -
    • Lecture “Dataviz at the Agência Pública” (2023, Unisinos)
    • -
    • Introduction to R and Tidyverse (2023, R-Ladies São Paulo)
    • -
    • Workshop “Python + R together with Quarto: your reports will never be the same” (2023, CODA Amazônia)
    • -
    • Workshop “Spreadsheet: a great ally of data analysis” (2023, CODA Amazônia)
    • -
    • Lecture: “Data visualization reveal social problems” (2023, Domingo de Dados - 18th Abraji Congress)
    • -
    • Workshop “Open Data Analysis with R - Open Data Day” (2023, R -Ladies São Paulo)
    • -
    • Workshop “Data journalism with spreadsheet editors” (2022, CODA.Br)
    • -
    • Webinar “the public in data and the map of conflicts” (2022, data school)
    • -
    • Data journalism to change realities (2022, Data Sunday - 17th Abraji Congress)
    • -
    • Lecture: “Business Chain - Data in all sectors of the economy: Data journalism” (2021, BIX Technology)
    • -
    -
    -
    -
    -

    Voluntary work

    -
      -
    • R-Ladies São Paulo
    • -
    • Cursinho pré-vestibular Jeannine Aboulafia
    • -
    -
    -
    -

    Disclaimer

    -

    Made w/ pagedown.
    -Source code: Github repo.

    -

    Last updated on 2023-12-02.

    -
    -
    - - - - - - diff --git a/helper_functions.R b/helper_functions.R deleted file mode 100644 index 216d829..0000000 --- a/helper_functions.R +++ /dev/null @@ -1,75 +0,0 @@ -#author: matt leary -#date:11/10/2019 -#intent: helper functions for resume built with R pagedown - -library(dplyr) -library(ggplot2) -library(tidyr) -library(purrr) - -# Construct a bar chart of skills -build_skill_bars <- function(skill_data, section_title) { - skill_data %>% - filter(section == section_title) %>% - ggplot(aes(x= reorder(skill, level), y = 5)) + - geom_col(fill = "lightgrey") + - geom_col(aes(x= reorder(skill, level), y = level), fill = "darkgrey") + - coord_flip() + - geom_text(aes(label = skill, y = 0.25), hjust = 0, size = 12, color = "white") + - expand_limits(y = c(0,5)) + - labs(x = NULL, - y = NULL) + - theme_void() + - theme(panel.background = element_rect(fill = "transparent", colour = NA), - plot.background = element_rect(fill = "transparent", colour = NA)) -} - - -# Take a position dataframe and the section id desired -# and prints the section to markdown. -print_section <- function(position_data, section_id){ - position_data %>% - filter(section == section_id & - include == TRUE) %>% - arrange(desc(end)) %>% - mutate(id = 1:n()) %>% - pivot_longer( - starts_with('description'), - names_to = 'description_num', - values_to = 'description' - ) %>% - filter(!is.na(description) | description_num == 'description_1') %>% ### work on this - group_by(id) %>% - mutate( - descriptions = list(description), - no_descriptions = is.na(first(description)) - ) %>% - ungroup() %>% - filter(description_num == 'description_1') %>% - mutate( - timeline = ifelse( - is.na(start) | start == end, - end, - glue('{end} - {start}') - ), - description_bullets = ifelse( - no_descriptions, - ' ', - map_chr(descriptions, ~paste('-', ., collapse = '\n')) - ) - ) %>% - # strip_links_from_cols(c('title', 'description_bullets')) %>% - mutate_all(~ifelse(is.na(.), 'N/A', .)) %>% - glue_data( - "### {title}", - "\n\n", - "{institution}", - "\n\n", - "{loc}", - "\n\n", - "{timeline}", - "\n\n", - "{description_bullets}", - "\n\n\n", - ) -} \ No newline at end of file diff --git a/index.html b/index.html deleted file mode 100644 index b20637a..0000000 --- a/index.html +++ /dev/null @@ -1,32756 +0,0 @@ - - - - - - - - - - - bmuniz_resume_pt - - - - - - - - - - - - - - - -
    -

    Aside

    - -
    -

    Tech skills

    -

    -
    -
    -

    R skills

    -

    -
    -
    -

    Languages

    -
      -
    • Portuguese
    • -
    • English
    • -
    • Spanish
    • -
    -
    -
    -

    Disclaimer

    -

    Made w/ pagedown.
    -Source code: Github repo.

    -

    Last updated on 2023-12-02.

    -
    -
    -
    -

    Main

    -
    -

    Bianca Muniz

    -

    I enjoy working with data to find and deliver insights, solve business problems, and build tools that help people work more efficiently. I moved into data science after 10+ years working in public policy and public relations.

    -
    -
    -

    Professional Experience

    -
    -

    Analista de Dados

    -

    Agência Pública de Jornalismo Investigativo

    -

    São Paulo, SP

    -

    Present - 2022

    -
      -
    • Crio soluções em automação para a rotina com dados e auxilio em reportagens que possuem demandas em bases de dados
    • -
    • . Participei do projeto Mapa dos Conflitos, parceria da Agência Pública com a Comissão Pastoral da Terra (CPT)
    • -
    -
    -
    -

    Estagiária de Jornalismo de Dados

    -

    Agência Pública de Jornalismo Investigativo

    -

    São Paulo, SP

    -

    2022 - 2020

    -
      -
    • Como estagiária, participei de coberturas reconhecidas pelo Prêmio Roche de Jornalismo em Saúde e reportagens finalistas do Sigma Awards e prêmio Cláudio Weber Abramo de Jornalismo de Dados.
    • -
    -
    -
    -

    Diretora vice-presidente

    -

    Jornalismo Júnior

    -

    São Paulo, SP

    -

    2019

    -
      -
    • Desempenhei atividades como planejamento financeiro, atualização do fluxo de caixa, orçamentos, redação de contratos e regulamentação jurídica, além de gerenciar projetos internos e externos com a presidente
    • -
    • -
    -
    -
    -

    Repórter

    -

    Jornalismo Júnior

    -

    São Paulo, SP

    -

    2018

    -
    -
    -
    -

    Education

    -
    -

    Bacharelado em Jornalismo

    -

    Universidade de São Paulo - SP

    -

    São Paulo, SP

    -

    2023 - 2018

    -
      -
    • Fui repórter e editora de diferentes jornais-laboratório: o Notícias do Jardim São Remo, a Agência Universitária de Notícias, o Jornal do Campus e o suplemento claro!.
    • -
    • Membro da Jornalismo Júnior, empresa júnior de Jornalismo da ECA-USP (2018 - 2019)
    • -
    • Fui monitora dos cursos de verão do Instituto de Matemática e Estatística (IME) da USP “Python para análise de dados” (2022) e “R para análise de dados” (2023)
    • -
    -
    -
    -

    Master em Jornalismo de Dados, Automação e Data Storytelling

    -

    Insper

    -

    São Paulo, SP

    -

    2022 - 2021

    -
      -
    • Lato-sensu specialization with double certification.
    • -
    -
    -
    -

    Mestrado em Ciências

    -

    Universidade Federal de São Paulo - Unifesp

    -

    São Paulo, SP

    -

    2022 - 2018

    -
      -
    • Mestrado em Ciências pelo Programa de Pós-Graduação em Farmacologia (Conceito CAPES 6), com projeto intitulado “Manipulações farmacológicas e não-farmacológicas para o reestabelecimento de um”tônus hedônico” em um modelo animal de esquizofrenia: a linhagem SHR”
    • -
    • Bolsista da Coordenação de Aperfeiçoamento de Pessoal de Nível Superior, CAPES (2018 - 2019)
    • -
    -
    -
    -

    Bacharelado em Biomedicina

    -

    Universidade Federal de São Paulo - Unifesp

    -

    São Paulo, SP

    -

    2017 - 2014

    -
      -
    • Participei da equipe executora do projeto de extensão universitária “Patógenos em Jogo” (2016)
    • -
    • Organizei o XV Curso de Inverno da Biomedicina - Unifesp (2017)
    • -
    • -
    -
    -
    -
    -

    Awards

    -
    -

    Prêmios e menções honrosas

    -

    Associado à Agência Pública de Jornalismo Investigativo

    -

    N/A

    -

    2021

    -
      -
    • Prêmio Cláudio Weber Abramo de Jornalismo de Dados - Shortlist (2021)
    • -
    • The Sigma Awards 2021 - Shortlist
    • -
    • Prêmio Roche de Jornalismo em Saúde (Menção honrosa)
    • -
    • 39º Prêmio Direitos Humanos de Jornalismo
    • -
    • 39º Prêmio Direitos Humanos de Jornalismo
    • -
    • Prêmio Cláudio Weber Abramo de Jornalismo de Dados - Shortlist (2022)
    • -
    • The Sigma Awards 2022 - Shortlist
    • -
    • Prêmio ANA
    • -
    • 39º Prêmio Direitos Humanos de Jornalismo (Menção honrosa)
    • -
    • Design for a Better World Award 2022
    • -
    -
    -
    -

    Bolsas e fellowships

    -

    N/A

    -

    N/A

    -

    2020

    -
      -
    • Bolsista do Amazon Rainforest Journalism Fund, do Pulitzer Center
    • -
    • Bolsa de Empreendedorismo e Inovação
    • -
    -
    -
    -
    -

    Talks

    -
    -

    Talks e workshops

    -

    N/A

    -

    N/A

    -

    N/A

    -
      -
    • Workshop “Python + R juntos com Quarto: seus relatórios nunca mais serão os mesmos” (2023, CODA Amazônia)
    • -
    • Workshop “Planilha: uma grande aliada da análise de dados” (2023, CODA Amazônia)
    • -
    • Workshop “Jornalismo de dados com editores de planilhas” (2022, CODA.Br)
    • -
    • Webinar “A Pública em dados e o Mapa dos Conflitos” (2022)
    • -
    • Laboratório de Dados Climáticos - Alma Preta Jornalismo
    • -
    • Introdução ao R e ao Tidyverse
    • -
    • Open Data Day
    • -
    • Jornalismo de dados para mudar realidades
    • -
    • Visualizações de dados revelam problemas sociais
    • -
    • Treinamento Portal Assobiar
    • -
    • Curso Comunicação: prática e reflexão
    • -
    • Dataviz na Agência Pública
    • -
    • Cadeia de Negócios - Dados em todos os setores da economia - Jornalismo de dados
    • -
    -
    -
    -
    -

    Trabalho Voluntário

    -
      -
    • R-Ladies São Paulo
    • -
    • Cursinho pré-vestibular Jeannine Aboulafia
    • -
    -
    -
    -

    Disclaimer

    -

    Made w/ pagedown.
    -Source code: Github repo.

    -

    Last updated on 2023-12-02.

    -
    -
    - - - - - - diff --git a/inst/js/cv_network.js b/inst/js/cv_network.js new file mode 100644 index 0000000..45e12d1 --- /dev/null +++ b/inst/js/cv_network.js @@ -0,0 +1,104 @@ +const data_json = document.querySelector("script[type='application/json']").textContent; +const {edges, nodes} = JSON.parse(data_json); + +class MyHandler extends Paged.Handler { + constructor(chunker, polisher, caller) { + super(chunker, polisher, caller); + } + + afterRendered (){ + plot_network(); + } +} +Paged.registerHandlers(MyHandler); + +function plot_network(){ + const {width, height} = document.querySelector("svg#cv_network_viz").getBoundingClientRect(); + + const svg = d3.select("svg#cv_network_viz") + .attr("width", width) + .attr("height", height); + + const unique_sections = [...new Set(nodes.map(d => d.section))]; + const color_scale = d3.scaleOrdinal() + .domain(unique_sections) + .range(d3.schemeSet2); + + const edge_color = d3.scaleLinear() + .domain(d3.extent(edges, d => d.year)); + + const simulation = d3.forceSimulation(nodes) + .force("link", d3.forceLink(edges).id(d => d.id)) + .force("charge", d3.forceManyBody()) + .force("center", d3.forceCenter(width / 2, height / 2)) + .on("tick", ticked); + + const g = svg.append("g"); + + const link = g + .selectAll("line") + .data(edges) + .enter().append("line") + .attr("stroke", d => d3.interpolateGreys(edge_color(d.year))) + .attr("stroke-width", 0.5); + + const node = g + .attr("stroke", "#fff") + .attr("stroke-width", 1.5) + .selectAll("circle") + .data(nodes) + .enter().append("circle") + .attr("r", 5) + .attr("fill", d => color_scale(d.section)) + .call(drag(simulation)); + + node.append("title") + .text(d => `${d.section}\n${d.title}`); + + svg.call(d3.zoom() + .extent([[0, 0], [width, height]]) + .scaleExtent([1, 8]) + .on("zoom", zoomed)); + + function ticked() { + link + .attr("x1", d => d.source.x) + .attr("y1", d => d.source.y) + .attr("x2", d => d.target.x) + .attr("y2", d => d.target.y); + + node + .attr("cx", d => d.x) + .attr("cy", d => d.y); + } + + function zoomed() { + g.attr("transform", d3.event.transform); + } + + function drag(simulation){ + + function dragstarted(d) { + if (!d3.event.active) simulation.alphaTarget(0.3).restart(); + d.fx = d.x; + d.fy = d.y; + } + + function dragged(d) { + d.fx = d3.event.x; + d.fy = d3.event.y; + } + + function dragended(d) { + if (!d3.event.active) simulation.alphaTarget(0); + d.fx = null; + d.fy = null; + } + + return d3.drag() + .on("start", dragstarted) + .on("drag", dragged) + .on("end", dragended); + } + +} diff --git a/inst/sample_data/contact_info.csv b/inst/sample_data/contact_info.csv new file mode 100644 index 0000000..a16c2ca --- /dev/null +++ b/inst/sample_data/contact_info.csv @@ -0,0 +1,7 @@ +,Icon used from font-awesome 4 to label this contact section,The actual value written for the contact entry +loc,icon,contact +email,envelope,nick.strayer@gmail.com +twitter,twitter,NicholasStrayer +github,github,github.com/nstrayer +website,link,nickstrayer.me +linkedin,linkedin,linkedin.com/in/nickstrayer \ No newline at end of file diff --git a/inst/sample_data/language_skills.csv b/inst/sample_data/language_skills.csv new file mode 100644 index 0000000..819b93e --- /dev/null +++ b/inst/sample_data/language_skills.csv @@ -0,0 +1,8 @@ +skill,level +R,5 +Javascript (d3.js),4.5 +C++,4 +Python,4 +Bash,3.5 +SQL,3 +AWK,3 \ No newline at end of file diff --git a/inst/sample_data/positions.csv b/inst/sample_data/positions.csv new file mode 100644 index 0000000..59a22e4 --- /dev/null +++ b/inst/sample_data/positions.csv @@ -0,0 +1,46 @@ +section,title,loc,institution,start,end,description_1,description_2,description_3,in_resume +education,"PhD. Candidate, Biostatistics",Vanderbilt University,"Nashville, TN",2015,,Working on Bayesian network models & interactive visualization platforms,University Graduate Fellow,,TRUE +education,"B.S., Mathematics, Statistics (minor C.S.)",University of Vermont,"Burlington, VT",2011,2015,Thesis: An agent based model of Diel Vertical Migration patterns of Mysis diluviana,,,TRUE +research_positions,Research Assistant,Adair Laboratory,University of Vermont,2012,2013,Independently analyzed and constructed statistical models for large data sets pertaining to carbon decomposition rates.,,,FALSE +research_positions,Undergraduate Researcher,Bentil Laboratory,University of Vermont,2013,2014,Developed mathematical model to predict the transport of sulfur through the environment with applications in waste cleanup.,,,FALSE +research_positions,Undergraduate Researcher,Rubenstein Ecosystems Science Laboratory,University of Vermont,2013,2015,Analyzed and visualized data for CATOS fish tracking project.,Head of data mining project to establish temporal trends in population densities of Mysis diluviana (Mysis).,Ran project to mathematically model the migration patterns of Mysis (honors thesis project.),FALSE +research_positions,Human Computer Interaction Researcher,LabInTheWild (Reineke Lab),University of Michigan,2015,2015,Led development and implementation of interactive data visualizations to help users compare themselves to other demographics.,,,FALSE +research_positions,Graduate Research Assistant,TBILab (Yaomin Xu's Lab),Vanderbilt University,2015,,Primarily working with large EHR and Biobank datasets.,Developing network-based methods to investigate and visualize clinically relevant patterns in data.,,TRUE +research_positions,Data Science Researcher,Data Science Lab,Johns Hopkins University,2017,2018,Building R Shiny applications in the contexts of wearables and statistics education.,Work primarily done in R Shiny and Javascript (node and d3js).,,TRUE +industry_positions,Software Engineering Intern,Conduce,"Carpinteria, CA",2014,2014,Incorporated d3.js to the company's main software platform.,,,FALSE +industry_positions,Engineering Intern - User Experience,Dealer.com,"Burlington, VT",2015,2015,Built internal tool to help analyze and visualize user interaction with back-end products.,,,FALSE +industry_positions,Data Science Intern,Dealer.com,"Burlington, VT",2015,2015,Worked with the product analytics team to help parse and visualize large stores of data to drive business decisions.,,,FALSE +industry_positions,Data Artist In Residence,Conduce,"Carpinteria, CA",2014,2015,"Envisioned, prototyped and implemented visualization framework in the course of one month.",Constructed training protocol for bringing third parties up to speed with new protocol.,,FALSE +industry_positions,Data Journalist - Graphics Department,New York Times,"New York, New York",2016,2016,"Reporter with the graphics desk covering topics in science, politics, and sport.","Work primarily done in R, Javascript, and Adobe Illustrator.",,TRUE +teaching_positions,Javascript for Shiny Users,RStudio::conf 2020,NA,,2020,Served as TA for two day workshop on how to leverage Javascript in Shiny applications,Lectured on [using R2D3 package to build interactive visualizations.](http://nickstrayer.me/js4shiny_r2d3/slides),,FALSE +teaching_positions,Statistical Computing in R,Vanderbilt Biostatistics Department,"Nashville, TN",2017,2017,TA and lectured,Covered introduction to R language for statistics applications,Graduate level class,FALSE +teaching_positions,Advanced Statistical Learning and Inference,Vanderbilt Biostatistics Department,"Nashville, TN",2017,2018,TA and lectured,Topics covered from penalized regression to boosted trees and neural networks,Highest level course offered in department,FALSE +teaching_positions,Advanced Statistical Computing,Vanderbilt Biostatistics Department,"Nashville, TN",2018,2018,TA and lectured,Covered modern statistical computing algorithms,4th year PhD level class,FALSE +teaching_positions,Data Visualization Best Practices,DataCamp,,2019,2019,Designed from bottom up course to teach best practices for scientific visualizations.,Uses R and ggplot2.,In top 10% on platform by popularity.,FALSE +teaching_positions,Improving your visualization in Python,DataCamp,,2019,2019,Designed from bottom up course to teach advanced methods for enhancing visualization.,"Uses python, matplotlib, and seaborn.",,FALSE +data_science_writings,[Classifying physical activity from smartphone data](https://blogs.rstudio.com/tensorflow/posts/2018-07-17-activity-detection/),RStudio Tensorflow Blog,,,2018,Walk through of training a convolutional neural network to achieve state of the art recognition of activities from accelerometer data.,Contracted article.,,FALSE +data_science_writings,[Using AWK and R to Parse 25tb](https://livefreeordichotomize.com/2019/06/04/using_awk_and_r_to_parse_25tb/),LiveFreeOrDichotomize.com,,,2019,Story of parsing large amounts of genomics data.,Provided advice for dealing with data much larger than disk.,Reached top of HackerNews.,TRUE +data_science_writings,[The United States of Seasons](https://livefreeordichotomize.com/2018/02/12/the-united-states-of-seasons/),LiveFreeOrDichotomize.com,,,2018,GIS analysis of weather data to find the most 'seasonal' locations in United States,Used Bayesian regression methods for smoothing sparse geospatial data.,,FALSE +data_science_writings,[A year as told by fitbit](https://livefreeordichotomize.com/2017/12/27/a-year-as-told-by-fitbit/),LiveFreeOrDichotomize.com,,,2017,Analyzing a full years worth of second-level heart rate data from wearable device.,Demonstrated visualization-based inference for large data.,,FALSE +data_science_writings,[MCMC and the case of the spilled seeds](https://livefreeordichotomize.com/2017/10/14/mcmc-and-the-case-of-the-spilled-seeds/),LiveFreeOrDichotomize.com,,,2017,Full Bayesian MCMC sampler running in your browser.,Coded from scratch in vanilla Javascript.,,FALSE +data_science_writings,[The Traveling Metallurgist](https://livefreeordichotomize.com/2017/09/25/the-traveling-metallurgist/),LiveFreeOrDichotomize.com,,,2017,Pure javascript implementation of traveling salesman solution using simulated annealing.,Allows reader to customize the number and location of cities to attempt to trick the algorithm.,,TRUE +about_me_press,[Great paper? Swipe right on the new ‘Tinder for preprints’ app](https://www.sciencemag.org/news/2017/06/great-paper-swipe-right-new-tinder-preprints-app),Science,,2017,2017,Story of the app [Papr](https://jhubiostatistics.shinyapps.io/papr/) made with Jeff Leek and Lucy D’Agostino McGowan.,,,FALSE +about_me_press,[Swipe right for science: Papr app is ‘Tinder for preprints’](https://www.nature.com/news/swipe-right-for-science-papr-app-is-tinder-for-preprints-1.22163),Nature News,,2017,2017,Second press article for app Papr.,,,FALSE +about_me_press,[The Deeper Story in the Data](https://www.uvm.edu/uvmnews/news/deeper-story-data),University of Vermont Quarterly,,2016,2016,Story on my path post graduation and the power of narrative.,,,FALSE +by_me_press,[The Great Student Migration](https://www.nytimes.com/interactive/2016/08/26/us/college-student-migration.html?smid=pl-share),The New York Times,,2016,2016,Most shared and discussed article from the New York Times for August 2016.,,,TRUE +by_me_press,"[Wildfires are Getting Worse, The New York Times](https://www.nytimes.com/interactive/2016/07/25/us/wildfire-seasons-los-angeles.html)",The New York Times,,2016,2016,GIS analysis and modeling of fire patterns and trends,Data in collaboration with NASA and USGS,,FALSE +by_me_press,[Who’s Speaking at the Democratic National Convention?](https://www.nytimes.com/2016/07/26/upshot/democrats-may-not-be-unified-but-their-convention-speakers-are.html),The New York Times,,2016,2016,Data scraped from CSPAN records to figure out who talked and past conventions.,,,FALSE +by_me_press,[Who’s Speaking at the Republican National Convention?](https://www.nytimes.com/2016/07/19/upshot/whos-not-speaking-how-this-republican-convention-differs.html?smid=pl-share),The New York Times,,2016,2016,Used same data scraping techniques as Who’s Speaking at the Democratic National Convention?,,,FALSE +by_me_press,"[A Trail of Terror in Nice, Block by Block](https://www.nytimes.com/interactive/2016/07/14/world/europe/trail-of-terror-france.html)",The New York Times,,2016,2016,"Led research effort to put together story of 2016 terrorist attack in Nice, France in less than 12 hours.","Work won Silver medal at Malofiej 2017, and gold at Society of News and Design.",,FALSE +academic_articles,Asymmetric Linkage Disequilibrium: Tools for Dissecting Multiallelic LD,Journal of Human Immunology,,2015,2015,"Authored with Richard Single, Vanja Paunic, Mark Albrecht, and Martin Maiers.",,,TRUE +academic_articles,[An Agent Based Model of Mysis Migration](https://www.semanticscholar.org/paper/An-Agent-Based-Model-of-the-Diel-Vertical-Migration-Strayer-Stockwell/40493c78e8ecf22bd882d17ec99fd913ec4b9820),International Association of Great Lakes Research Conference,,2015,2015,"Authored with Brian O'Malley, Sture Hansson, and Jason Stockwell.",,,FALSE +academic_articles,Declines of Mysis diluviana in the Great Lakes,Journal of Great Lakes Research,,2015,2015,Authored with Peter Euclide and Jason Stockwell.,,,FALSE +academic_articles,[Continuous Classification using Deep Neural Networks](http://nickstrayer.me/qualifying_exam/),Vanderbilt Biostatistics Qualification Exam,,2017,2017,Review of methods for classifying continuous data streams using neural networks,Successfully met qualifying examination standards,,FALSE +academic_articles,[Charge Reductions Associated with Shortening Time to Recovery in Septic Shock](https://www.ncbi.nlm.nih.gov/pubmed/30419234),Chest,,2019,2019,"Authored with Wesley H. Self, MD MPH; Dandan Liu, PhD; Stephan Russ, MD, MPH; Michael J. Ward, MD, PhD, MBA; Nathan I. Shapiro, MD, MPH; Todd W. Rice, MD, MSc; Matthew W. Semler, MD, MSc.",,,TRUE +academic_articles,R timelineViz: Visualizing the distribution of study events in longitudinal studies,Under-Review (copy available upon request.),,2018,2018,Authored with Alex Sunderman of the Vanderbilt Department of Epidemiology.,,,FALSE +academic_articles,[Multimorbidity Explorer | A shiny app for exploring EHR and biobank data](http://nickstrayer.me/rstudioconf19_me-poster/),RStudio::conf 2019,,2019,2019,Contributed Poster. Authored with Yaomin Xu.,,,TRUE +academic_articles,[Taking a network view of EHR and Biobank data to find explainable multivariate patterns](http://nickstrayer.me/biostat_seminar/),Vanderbilt Biostatistics Seminar Series,,2019,2019,University wide seminar series.,,,FALSE +academic_articles,Patient-specific risk factors independently influence survival in Myelodysplastic Syndromes in an unbiased review of EHR records,Under-Review (copy available upon request.),,,2019,Bayesian network analysis used to find novel subgroups of patients with Myelodysplastic Syndromes (MDS).,Analysis done using method built for my dissertation.,,FALSE +academic_articles,Building a software package in tandem with machine learning methods research can result in both more rigorous code and more rigorous research,ENAR 2020,,,2020,Invited talk in Human Data Interaction section.,How and why building an R package can benefit methodological research,,FALSE +academic_articles,"[Stochastic Block Modeling in R, Statistically rigorous clustering with rigorous code](http://nickstrayer.me/rstudioconf_sbm)",RStudio::conf 2020,,,2020,Invited talk about new [sbmR package](https://tbilab.github.io/sbmR/).,Focus on how software development and methodological research can improve both benefit when done in tandem.,,TRUE +academic_articles,Patient specific comorbidities impact overall survival in myelofibrosis,Under-Review (copy available upon request.),,,2019,Bayesian network analysis used to find robust novel subgroups of patients with given genetic mutations.,Analysis done using method built for my dissertation.,,FALSE \ No newline at end of file diff --git a/inst/sample_data/text_blocks.csv b/inst/sample_data/text_blocks.csv new file mode 100644 index 0000000..77f3592 --- /dev/null +++ b/inst/sample_data/text_blocks.csv @@ -0,0 +1,7 @@ +loc,text +intro,"I have made [visualizations viewed by hundreds of thousands of people](https://www.nytimes.com/interactive/2016/08/26/us/college-student-migration.html), [sped up query times for 25 terabytes of data by an average of 4,800 times](https://livefreeordichotomize.com/2019/06/04/using_awk_and_r_to_parse_25tb/), and built [packages for R](https://github.com/nstrayer/shinysense) that let you [do magic](http://nickstrayer.me/dataDayTexas/). + +Currently searching for a position that allows me to build tools leveraging a combination of visualization, machine learning, and software engineering to help people explore and understand their data in new and useful ways." +industy_experience_aside,I have worked in a variety of roles ranging from journalist to software engineer to data scientist. I like collaborative environments where I can learn from my peers. +teaching_experience_aside,I am passionate about education. I believe that no topic is too complex if the teacher is empathetic and willing to think about new methods of approaching task. +data_science_writing_aside,I regularly blog about data science and visualization on my blog [LiveFreeOrDichotomize.](https://livefreeordichotomize.com/) \ No newline at end of file diff --git a/inst/templates/CV_printing_functions.R b/inst/templates/CV_printing_functions.R new file mode 100644 index 0000000..1d752ca --- /dev/null +++ b/inst/templates/CV_printing_functions.R @@ -0,0 +1,240 @@ +# This file contains all the code needed to parse and print various sections of your CV +# from data. Feel free to tweak it as you desire! + + +#' Create a CV_Printer object. +#' +#' @param data_location Path of the spreadsheets holding all your data. This can be +#' either a URL to a google sheet with multiple sheets containing the four +#' data types or a path to a folder containing four `.csv`s with the neccesary +#' data. +#' @param source_location Where is the code to build your CV hosted? +#' @param pdf_mode Is the output being rendered into a pdf? Aka do links need +#' to be stripped? +#' @param sheet_is_publicly_readable If you're using google sheets for data, +#' is the sheet publicly available? (Makes authorization easier.) +#' @return A new `CV_Printer` object. +create_CV_object <- function(data_location, + pdf_mode = FALSE, + sheet_is_publicly_readable = TRUE) { + + cv <- list( + pdf_mode = pdf_mode, + links = c() + ) + + is_google_sheets_location <- stringr::str_detect(data_location, "docs\\.google\\.com") + + if(is_google_sheets_location){ + if(sheet_is_publicly_readable){ + # This tells google sheets to not try and authenticate. Note that this will only + # work if your sheet has sharing set to "anyone with link can view" + googlesheets4::sheets_deauth() + } else { + # My info is in a public sheet so there's no need to do authentication but if you want + # to use a private sheet, then this is the way you need to do it. + # designate project-specific cache so we can render Rmd without problems + options(gargle_oauth_cache = ".secrets") + } + + read_gsheet <- function(sheet_id){ + googlesheets4::read_sheet(data_location, sheet = sheet_id, skip = 1, col_types = "c") + } + cv$entries_data <- read_gsheet(sheet_id = "entries") + cv$skills <- read_gsheet(sheet_id = "language_skills") + cv$text_blocks <- read_gsheet(sheet_id = "text_blocks") + cv$contact_info <- read_gsheet(sheet_id = "contact_info") + } else { + # Want to go old-school with csvs? + cv$entries_data <- readr::read_csv(paste0(data_location, "entries.csv"), skip = 1) + cv$skills <- readr::read_csv(paste0(data_location, "language_skills.csv"), skip = 1) + cv$text_blocks <- readr::read_csv(paste0(data_location, "text_blocks.csv"), skip = 1) + cv$contact_info <- readr::read_csv(paste0(data_location, "contact_info.csv"), skip = 1) + } + + + extract_year <- function(dates){ + date_year <- stringr::str_extract(dates, "(20|19)[0-9]{2}") + date_year[is.na(date_year)] <- lubridate::year(lubridate::ymd(Sys.Date())) + 10 + + date_year + } + + parse_dates <- function(dates){ + + date_month <- stringr::str_extract(dates, "(\\w+|\\d+)(?=(\\s|\\/|-)(20|19)[0-9]{2})") + date_month[is.na(date_month)] <- "1" + + paste("1", date_month, extract_year(dates), sep = "-") %>% + lubridate::dmy() + } + + # Clean up entries dataframe to format we need it for printing + cv$entries_data %<>% + tidyr::unite( + tidyr::starts_with('description'), + col = "description_bullets", + sep = "\n- ", + na.rm = TRUE + ) %>% + dplyr::mutate( + description_bullets = ifelse(description_bullets != "", paste0("- ", description_bullets), ""), + start = ifelse(start == "NULL", NA, start), + end = ifelse(end == "NULL", NA, end), + start_year = extract_year(start), + end_year = extract_year(end), + no_start = is.na(start), + has_start = !no_start, + no_end = is.na(end), + has_end = !no_end, + timeline = dplyr::case_when( + no_start & no_end ~ "N/A", + no_start & has_end ~ as.character(end), + has_start & no_end ~ paste("Current", "-", start), + TRUE ~ paste(end, "-", start) + ) + ) %>% + dplyr::arrange(desc(parse_dates(end))) %>% + dplyr::mutate_all(~ ifelse(is.na(.), 'N/A', .)) + + cv +} + + +# Remove links from a text block and add to internal list +sanitize_links <- function(cv, text){ + if(cv$pdf_mode){ + link_titles <- stringr::str_extract_all(text, '(?<=\\[).+?(?=\\])')[[1]] + link_destinations <- stringr::str_extract_all(text, '(?<=\\().+?(?=\\))')[[1]] + + n_links <- length(cv$links) + n_new_links <- length(link_titles) + + if(n_new_links > 0){ + # add links to links array + cv$links <- c(cv$links, link_destinations) + + # Build map of link destination to superscript + link_superscript_mappings <- purrr::set_names( + paste0("", (1:n_new_links) + n_links, ""), + paste0("(", link_destinations, ")") + ) + + # Replace the link destination and remove square brackets for title + text <- text %>% + stringr::str_replace_all(stringr::fixed(link_superscript_mappings)) %>% + stringr::str_replace_all('\\[(.+?)\\]', "\\1") + } + } + + list(cv = cv, text = text) +} + + +#' @description Take a position data frame and the section id desired and prints the section to markdown. +#' @param section_id ID of the entries section to be printed as encoded by the `section` column of the `entries` table +print_section <- function(cv, section_id, glue_template = "default"){ + + if(glue_template == "default"){ + glue_template <- " +### {title} + +{loc} + +{institution} + +{timeline} + +{description_bullets} +\n\n\n" + } + + section_data <- dplyr::filter(cv$entries_data, section == section_id) + + # Take entire entries data frame and removes the links in descending order + # so links for the same position are right next to each other in number. + for(i in 1:nrow(section_data)){ + for(col in c('title', 'description_bullets')){ + strip_res <- sanitize_links(cv, section_data[i, col]) + section_data[i, col] <- strip_res$text + cv <- strip_res$cv + } + } + + print(glue::glue_data(section_data, glue_template)) + + invisible(strip_res$cv) +} + + + +#' @description Prints out text block identified by a given label. +#' @param label ID of the text block to print as encoded in `label` column of `text_blocks` table. +print_text_block <- function(cv, label){ + text_block <- dplyr::filter(cv$text_blocks, loc == label) %>% + dplyr::pull(text) + + strip_res <- sanitize_links(cv, text_block) + + cat(strip_res$text) + + invisible(strip_res$cv) +} + + + +#' @description Construct a bar chart of skills +#' @param out_of The relative maximum for skills. Used to set what a fully filled in skill bar is. +print_skill_bars <- function(cv, out_of = 5, bar_color = "#969696", bar_background = "#d9d9d9", glue_template = "default"){ + + if(glue_template == "default"){ + glue_template <- " +
    {skill}
    " + } + cv$skills %>% + dplyr::mutate(width_percent = round(100*as.numeric(level)/out_of)) %>% + glue::glue_data(glue_template) %>% + print() + + invisible(cv) +} + + + +#' @description List of all links in document labeled by their superscript integer. +print_links <- function(cv) { + n_links <- length(cv$links) + if (n_links > 0) { + cat(" +Links {data-icon=link} +-------------------------------------------------------------------------------- + +
    + + +") + + purrr::walk2(cv$links, 1:n_links, function(link, index) { + print(glue::glue('{index}. {link}')) + }) + } + + invisible(cv) +} + + + +#' @description Contact information section with icons +print_contact_info <- function(cv){ + glue::glue_data( + cv$contact_info, + "- {contact}" + ) %>% print() + + invisible(cv) +} diff --git a/inst/templates/contact_info.csv b/inst/templates/contact_info.csv new file mode 100644 index 0000000..7bb6a1d --- /dev/null +++ b/inst/templates/contact_info.csv @@ -0,0 +1,7 @@ +Id of contact section,Icon used from font-awesome 4 to label this contact section,The actual value written for the contact entry +loc,icon,contact +email,envelope,nick.strayer@gmail.com +twitter,twitter,NicholasStrayer +github,github,github.com/nstrayer +website,link,nickstrayer.me +linkedin,linkedin,linkedin.com/in/nickstrayer \ No newline at end of file diff --git a/inst/templates/csv_rendered.png b/inst/templates/csv_rendered.png new file mode 100644 index 0000000..8cc5297 Binary files /dev/null and b/inst/templates/csv_rendered.png differ diff --git a/inst/templates/cv.Rmd b/inst/templates/cv.Rmd new file mode 100644 index 0000000..7f7ba53 --- /dev/null +++ b/inst/templates/cv.Rmd @@ -0,0 +1,210 @@ +--- +title: "{{{full_name}}}'s CV" +author: {{{full_name}}} +date: "`r Sys.Date()`" +params: + pdf_mode: + value: true +output: + pagedown::html_resume: + css: ['dd_cv.css', 'resume'] + self_contained: true +--- + +```{r, include=FALSE} +knitr::opts_chunk$set( + results='asis', + echo = FALSE +) + +library(magrittr) # For the pipe +source("cv_printing_functions.r") + +# Read in all data and initialize a CV printer object +CV <- create_CV_object( + data_location = "{{{data_location}}}", + pdf_mode = params$pdf_mode +) + +``` + + +```{r} +# When in pdf export mode the little dots are unaligned, so fix that with some conditional CSS. +if(params$pdf_mode) { + cat(" +") +} +``` + + +Aside +================================================================================ + +{{#use_network_logo}} +```{r} +# Build interactive network of positions colored by section +# and connected if they occurred in the same year +datadrivencv::build_network_logo(CV$entries_data) +``` +{{/use_network_logo}} +{{^use_network_logo}} + +![logo](https://cran.r-project.org/Rlogo.svg){width=100%} +{{/use_network_logo}} + + +```{r} +if(params$pdf_mode){ + cat("View this CV online with links at _{{{html_location}}}_") +} else { + cat("[ Download a PDF of this CV]({{pdf_location}})") +} +``` + +Contact {#contact} +-------------------------------------------------------------------------------- + +```{r} +CV %>% print_contact_info() +``` + + + +Language Skills {#skills} +-------------------------------------------------------------------------------- + +```{r} +CV %>% print_skill_bars() +``` + + + +Disclaimer {#disclaimer} +-------------------------------------------------------------------------------- + +Made with the R package [**pagedown**](https://github.com/rstudio/pagedown). + +The source code is available [on github.com/nstrayer/cv]({{{source_location}}}). + +Last updated on `r Sys.Date()`. + + + +Main +================================================================================ + +{{{full_name}}} {#title} +-------------------------------------------------------------------------------- + +```{r} +# Note the special double pipe so we modify the CV object in place +CV %<>% print_text_block("intro") +``` + + + +Education {data-icon=graduation-cap data-concise=true} +-------------------------------------------------------------------------------- + +```{r} +CV %<>% print_section('education') +``` + + + +Research Experience {data-icon=laptop} +-------------------------------------------------------------------------------- + +```{r} +CV %<>% print_section('research_positions') +``` + + + +Industry Experience {data-icon=suitcase} +-------------------------------------------------------------------------------- + +::: aside +```{r} +CV %<>% print_text_block('industy_experience_aside') +``` +::: + +```{r} +CV %<>% print_section('industry_positions') +``` + + +
    +
    +
    + +Teaching Experience {data-icon=chalkboard-teacher} +-------------------------------------------------------------------------------- + +::: aside +```{r} +CV %<>% print_text_block('teaching_experience_aside') +``` +::: + +```{r} +CV %<>% print_section('teaching_positions') +``` + + + +Selected Data Science Writing {data-icon=chart-line} +-------------------------------------------------------------------------------- + +::: aside +```{r} +CV %<>% print_text_block('data_science_writing_aside') +``` +::: + +```{r} +CV %<>% print_section('data_science_writings') +``` + + + +Selected Press (About) {data-icon=newspaper} +-------------------------------------------------------------------------------- + +```{r} +CV %<>% print_section('about_me_press') +``` + + +
    +
    + + +Selected Press (By) {data-icon=newspaper} +-------------------------------------------------------------------------------- + +```{r} +CV %<>% print_section('by_me_press') +``` + + + +Selected Publications, Posters, and Talks {data-icon=book} +-------------------------------------------------------------------------------- + +```{r} +CV %<>% print_section('academic_articles') +``` + + + +```{r} +CV %<>% print_links() +``` + diff --git a/css/styles_pdf.css b/inst/templates/dd_cv.css similarity index 62% rename from css/styles_pdf.css rename to inst/templates/dd_cv.css index faaefc6..ace386f 100644 --- a/css/styles_pdf.css +++ b/inst/templates/dd_cv.css @@ -1,18 +1,18 @@ -@import url('https://fonts.googleapis.com/css?family=Arimo|Open+Sans&display=swap'); +@import url("https://fonts.googleapis.com/css?family=Montserrat|Playfair+Display&display=swap"); -/* Main text is Opens Sans font*/ +/* Main text is monserrat*/ body { - font-family: "Open Sans", sans-serif; + font-family: "Montserrat", sans-serif; font-weight: 300; line-height: 1.3; color: #444; } -/* Give headers Arimo font */ +/* Give headers playfair font */ h1, h2, h3 { - font-family: "Arimo", serif; + font-family: "Playfair Display", serif; color: #000; } @@ -29,17 +29,17 @@ sup { * { /* Override default right margin for sidebar*/ - --pagedjs-margin-right: 0.5in; + --pagedjs-margin-right: 0.2in; --pagedjs-margin-left: 0.2in; } /* Customize some of the sizing variables */ :root { --sidebar-width: 12rem; /* Shrink sidebar width */ - --sidebar-background-color: #ffffff; /* Make sidebar white */ + --sidebar-background-color: #f7fbff; /* Make sidebar blue */ --sidebar-horizontal-padding: 0.01in; /* Reduce sidebar padding */ --decorator-outer-dim: 10px; /* Make position deliniating circles larger */ - --decorator-border: 2px solid #a9a9a9; /* Change timeline color*/ + --decorator-border: 2px solid #bdd7e7; /* Make timeline a blue as well*/ } .details .place { @@ -83,11 +83,10 @@ sup { font-size: 0.75rem; } -/* Make little circle outline be a light blue +/* Make little circle outline be a light blue */ .decorator::after { - background-color: #a9a9a9; - color: #a9a9a9; -} */ + background-color: #08306b; +} /* Remove the fake bullets from lists */ .aside li::before { @@ -102,3 +101,24 @@ sup { width: 100%; } + +/* When the class no-timeline is added we remove the after psuedo element from the header... */ + +/* Removes the psuedo element on h2 tags for this section */ +.section.no-timeline h2::after { + content: none; +} + +/* Without adding padding the content is all up on the title */ +.section.no-timeline h2 { + padding-bottom: 1rem; +} + +/* Add styles for little cards */ +.info-card{ + width: 220px; + float: left; + padding: 0.5rem; + margin: 0.5rem; + box-shadow: 1px 1px 4px black; +} diff --git a/css/custom_resume.css b/inst/templates/dd_resume.css similarity index 84% rename from css/custom_resume.css rename to inst/templates/dd_resume.css index 1bccc00..725a570 100644 --- a/css/custom_resume.css +++ b/inst/templates/dd_resume.css @@ -3,7 +3,7 @@ /* Override default right margin for sidebar*/ --pagedjs-margin-right: 0.2in !important; --pagedjs-margin-left: 0; - --pagedjs-margin-top: 0.5in; + --pagedjs-margin-top: 0.2in; --pagedjs-margin-bottom: 0.2in; } @@ -12,7 +12,7 @@ } [data-id="title"] { - margin: 0.25 0.5in 0.8in -0.5in; + margin: 0 0.5in 0.08in -0.5in; } .main-block { @@ -51,14 +51,6 @@ margin: 0.05in 0 0.05in; } -[data-id="interests"] ul { - margin: 0.05in 0 0.05in; -} - -[data-id="rpackages"] ul { - margin: 0.05in 0 0.05in; -} - [data-id="contact"] ul { padding-left: 0 !important; margin-top: 0.75rem; diff --git a/inst/templates/entries.csv b/inst/templates/entries.csv new file mode 100644 index 0000000..ccb2d9e --- /dev/null +++ b/inst/templates/entries.csv @@ -0,0 +1,47 @@ +Where in your CV this entry belongs,Main title of the entry,Location the entry occured,Primary institution affiliation for entry,Start date of entry (year),"End year of entry. Set to ""current"" if entry is still ongoing.","Each description column is a separate bullet point for the entry. If you need more description bullet points simply add a new column with title ""description_{4,5,..}""",,,A filter variable that is used to decide if entry is in the smaller resume. +section,title,loc,institution,start,end,description_1,description_2,description_3,in_resume +education,"PhD. Candidate, Biostatistics",Vanderbilt University,"Nashville, TN",2015,,Working on Bayesian network models & interactive visualization platforms,University Graduate Fellow,,TRUE +education,"B.S., Mathematics, Statistics (minor C.S.)",University of Vermont,"Burlington, VT",2011,2015,Thesis: An agent based model of Diel Vertical Migration patterns of Mysis diluviana,,,TRUE +research_positions,Research Assistant,Adair Laboratory,University of Vermont,2012,2013,Independently analyzed and constructed statistical models for large data sets pertaining to carbon decomposition rates.,,,FALSE +research_positions,Undergraduate Researcher,Bentil Laboratory,University of Vermont,2013,2014,Developed mathematical model to predict the transport of sulfur through the environment with applications in waste cleanup.,,,FALSE +research_positions,Undergraduate Researcher,Rubenstein Ecosystems Science Laboratory,University of Vermont,2013,2015,Analyzed and visualized data for CATOS fish tracking project.,Head of data mining project to establish temporal trends in population densities of Mysis diluviana (Mysis).,Ran project to mathematically model the migration patterns of Mysis (honors thesis project.),FALSE +research_positions,Human Computer Interaction Researcher,LabInTheWild (Reineke Lab),University of Michigan,2015,2015,Led development and implementation of interactive data visualizations to help users compare themselves to other demographics.,,,FALSE +research_positions,Graduate Research Assistant,TBILab (Yaomin Xu's Lab),Vanderbilt University,2015,,Primarily working with large EHR and Biobank datasets.,Developing network-based methods to investigate and visualize clinically relevant patterns in data.,,TRUE +research_positions,Data Science Researcher,Data Science Lab,Johns Hopkins University,2017,2018,Building R Shiny applications in the contexts of wearables and statistics education.,Work primarily done in R Shiny and Javascript (node and d3js).,,TRUE +industry_positions,Software Engineering Intern,Conduce,"Carpinteria, CA",2014,2014,Incorporated d3.js to the company's main software platform.,,,FALSE +industry_positions,Engineering Intern - User Experience,Dealer.com,"Burlington, VT",2015,2015,Built internal tool to help analyze and visualize user interaction with back-end products.,,,FALSE +industry_positions,Data Science Intern,Dealer.com,"Burlington, VT",2015,2015,Worked with the product analytics team to help parse and visualize large stores of data to drive business decisions.,,,FALSE +industry_positions,Data Artist In Residence,Conduce,"Carpinteria, CA",2014,2015,"Envisioned, prototyped and implemented visualization framework in the course of one month.",Constructed training protocol for bringing third parties up to speed with new protocol.,,FALSE +industry_positions,Data Journalist - Graphics Department,New York Times,"New York, New York",2016,2016,"Reporter with the graphics desk covering topics in science, politics, and sport.","Work primarily done in R, Javascript, and Adobe Illustrator.",,TRUE +teaching_positions,Javascript for Shiny Users,RStudio::conf 2020,N/A,,2020,Served as TA for two day workshop on how to leverage Javascript in Shiny applications,Lectured on [using R2D3 package to build interactive visualizations.](http://nickstrayer.me/js4shiny_r2d3/slides),,FALSE +teaching_positions,Statistical Computing in R,Vanderbilt Biostatistics Department,"Nashville, TN",2017,2017,TA and lectured,Covered introduction to R language for statistics applications,Graduate level class,FALSE +teaching_positions,Advanced Statistical Learning and Inference,Vanderbilt Biostatistics Department,"Nashville, TN",2017,2018,TA and lectured,Topics covered from penalized regression to boosted trees and neural networks,Highest level course offered in department,FALSE +teaching_positions,Advanced Statistical Computing,Vanderbilt Biostatistics Department,"Nashville, TN",2018,2018,TA and lectured,Covered modern statistical computing algorithms,4th year PhD level class,FALSE +teaching_positions,Data Visualization Best Practices,DataCamp,,2019,2019,Designed from bottom up course to teach best practices for scientific visualizations.,Uses R and ggplot2.,In top 10% on platform by popularity.,FALSE +teaching_positions,Improving your visualization in Python,DataCamp,,2019,2019,Designed from bottom up course to teach advanced methods for enhancing visualization.,"Uses python, matplotlib, and seaborn.",,FALSE +data_science_writings,[Classifying physical activity from smartphone data](https://blogs.rstudio.com/tensorflow/posts/2018-07-17-activity-detection/),RStudio Tensorflow Blog,,,2018,Walk through of training a convolutional neural network to achieve state of the art recognition of activities from accelerometer data.,Contracted article.,,FALSE +data_science_writings,[Using AWK and R to Parse 25tb](https://livefreeordichotomize.com/2019/06/04/using_awk_and_r_to_parse_25tb/),LiveFreeOrDichotomize.com,,,2019,Story of parsing large amounts of genomics data.,Provided advice for dealing with data much larger than disk.,Reached top of HackerNews.,TRUE +data_science_writings,[The United States of Seasons](https://livefreeordichotomize.com/2018/02/12/the-united-states-of-seasons/),LiveFreeOrDichotomize.com,,,2018,GIS analysis of weather data to find the most 'seasonal' locations in United States,Used Bayesian regression methods for smoothing sparse geospatial data.,,FALSE +data_science_writings,[A year as told by fitbit](https://livefreeordichotomize.com/2017/12/27/a-year-as-told-by-fitbit/),LiveFreeOrDichotomize.com,,,2017,Analyzing a full years worth of second-level heart rate data from wearable device.,Demonstrated visualization-based inference for large data.,,FALSE +data_science_writings,[MCMC and the case of the spilled seeds](https://livefreeordichotomize.com/2017/10/14/mcmc-and-the-case-of-the-spilled-seeds/),LiveFreeOrDichotomize.com,,,2017,Full Bayesian MCMC sampler running in your browser.,Coded from scratch in vanilla Javascript.,,FALSE +data_science_writings,[The Traveling Metallurgist](https://livefreeordichotomize.com/2017/09/25/the-traveling-metallurgist/),LiveFreeOrDichotomize.com,,,2017,Pure javascript implementation of traveling salesman solution using simulated annealing.,Allows reader to customize the number and location of cities to attempt to trick the algorithm.,,TRUE +about_me_press,[Great paper? Swipe right on the new ‘Tinder for preprints’ app](https://www.sciencemag.org/news/2017/06/great-paper-swipe-right-new-tinder-preprints-app),Science,,2017,2017,Story of the app [Papr](https://jhubiostatistics.shinyapps.io/papr/) made with Jeff Leek and Lucy D’Agostino McGowan.,,,FALSE +about_me_press,[Swipe right for science: Papr app is ‘Tinder for preprints’](https://www.nature.com/news/swipe-right-for-science-papr-app-is-tinder-for-preprints-1.22163),Nature News,,2017,2017,Second press article for app Papr.,,,FALSE +about_me_press,[The Deeper Story in the Data](https://www.uvm.edu/uvmnews/news/deeper-story-data),University of Vermont Quarterly,,2016,2016,Story on my path post graduation and the power of narrative.,,,FALSE +by_me_press,[The Great Student Migration](https://www.nytimes.com/interactive/2016/08/26/us/college-student-migration.html?smid=pl-share),The New York Times,,2016,2016,Most shared and discussed article from the New York Times for August 2016.,,,TRUE +by_me_press,"[Wildfires are Getting Worse, The New York Times](https://www.nytimes.com/interactive/2016/07/25/us/wildfire-seasons-los-angeles.html)",The New York Times,,2016,2016,GIS analysis and modeling of fire patterns and trends,Data in collaboration with NASA and USGS,,FALSE +by_me_press,[Who’s Speaking at the Democratic National Convention?](https://www.nytimes.com/2016/07/26/upshot/democrats-may-not-be-unified-but-their-convention-speakers-are.html),The New York Times,,2016,2016,Data scraped from CSPAN records to figure out who talked and past conventions.,,,FALSE +by_me_press,[Who’s Speaking at the Republican National Convention?](https://www.nytimes.com/2016/07/19/upshot/whos-not-speaking-how-this-republican-convention-differs.html?smid=pl-share),The New York Times,,2016,2016,Used same data scraping techniques as Who’s Speaking at the Democratic National Convention?,,,FALSE +by_me_press,"[A Trail of Terror in Nice, Block by Block](https://www.nytimes.com/interactive/2016/07/14/world/europe/trail-of-terror-france.html)",The New York Times,,2016,2016,"Led research effort to put together story of 2016 terrorist attack in Nice, France in less than 12 hours.","Work won Silver medal at Malofiej 2017, and gold at Society of News and Design.",,FALSE +academic_articles,Asymmetric Linkage Disequilibrium: Tools for Dissecting Multiallelic LD,Journal of Human Immunology,,2015,2015,"Authored with Richard Single, Vanja Paunic, Mark Albrecht, and Martin Maiers.",,,TRUE +academic_articles,[An Agent Based Model of Mysis Migration](https://www.semanticscholar.org/paper/An-Agent-Based-Model-of-the-Diel-Vertical-Migration-Strayer-Stockwell/40493c78e8ecf22bd882d17ec99fd913ec4b9820),International Association of Great Lakes Research Conference,,2015,2015,"Authored with Brian O'Malley, Sture Hansson, and Jason Stockwell.",,,FALSE +academic_articles,Declines of Mysis diluviana in the Great Lakes,Journal of Great Lakes Research,,2015,2015,Authored with Peter Euclide and Jason Stockwell.,,,FALSE +academic_articles,[Continuous Classification using Deep Neural Networks](http://nickstrayer.me/qualifying_exam/),Vanderbilt Biostatistics Qualification Exam,,2017,2017,Review of methods for classifying continuous data streams using neural networks,Successfully met qualifying examination standards,,FALSE +academic_articles,[Charge Reductions Associated with Shortening Time to Recovery in Septic Shock](https://www.ncbi.nlm.nih.gov/pubmed/30419234),Chest,,2019,2019,"Authored with Wesley H. Self, MD MPH; Dandan Liu, PhD; Stephan Russ, MD, MPH; Michael J. Ward, MD, PhD, MBA; Nathan I. Shapiro, MD, MPH; Todd W. Rice, MD, MSc; Matthew W. Semler, MD, MSc.",,,TRUE +academic_articles,R timelineViz: Visualizing the distribution of study events in longitudinal studies,Under-Review (copy available upon request.),,2018,2018,Authored with Alex Sunderman of the Vanderbilt Department of Epidemiology.,,,FALSE +academic_articles,[Multimorbidity Explorer | A shiny app for exploring EHR and biobank data](http://nickstrayer.me/rstudioconf19_me-poster/),RStudio::conf 2019,,2019,2019,Contributed Poster. Authored with Yaomin Xu.,,,TRUE +academic_articles,[Taking a network view of EHR and Biobank data to find explainable multivariate patterns](http://nickstrayer.me/biostat_seminar/),Vanderbilt Biostatistics Seminar Series,,2019,2019,University wide seminar series.,,,FALSE +academic_articles,Patient-specific risk factors independently influence survival in Myelodysplastic Syndromes in an unbiased review of EHR records,Under-Review (copy available upon request.),,,2019,Bayesian network analysis used to find novel subgroups of patients with Myelodysplastic Syndromes (MDS).,Analysis done using method built for my dissertation.,,FALSE +academic_articles,Building a software package in tandem with machine learning methods research can result in both more rigorous code and more rigorous research,ENAR 2020,,,2020,Invited talk in Human Data Interaction section.,How and why building an R package can benefit methodological research,,FALSE +academic_articles,"[Stochastic Block Modeling in R, Statistically rigorous clustering with rigorous code](http://nickstrayer.me/rstudioconf_sbm)",RStudio::conf 2020,,,2020,Invited talk about new [sbmR package](https://tbilab.github.io/sbmR/).,Focus on how software development and methodological research can improve both benefit when done in tandem.,,TRUE +academic_articles,Patient specific comorbidities impact overall survival in myelofibrosis,Under-Review (copy available upon request.),,,2019,Bayesian network analysis used to find robust novel subgroups of patients with given genetic mutations.,Analysis done using method built for my dissertation.,,FALSE \ No newline at end of file diff --git a/inst/templates/language_skills.csv b/inst/templates/language_skills.csv new file mode 100644 index 0000000..7db141e --- /dev/null +++ b/inst/templates/language_skills.csv @@ -0,0 +1,9 @@ +Name of language,Relative numeric level of skill +skill,level +R,5 +Javascript (d3.js),4.5 +C++,4 +Python,4 +Bash,3.5 +SQL,3 +AWK,3 \ No newline at end of file diff --git a/inst/templates/render_cv.R b/inst/templates/render_cv.R new file mode 100644 index 0000000..8f31926 --- /dev/null +++ b/inst/templates/render_cv.R @@ -0,0 +1,21 @@ +# This script builds both the HTML and PDF versions of your CV + +# If you wanted to speed up rendering for googlesheets driven CVs you could use +# this script to cache a version of the CV_Printer class with data already +# loaded and load the cached version in the .Rmd instead of re-fetching it twice +# for the HTML and PDF rendering. This exercise is left to the reader. + +# Knit the HTML version +rmarkdown::render("cv.rmd", + params = list(pdf_mode = FALSE), + output_file = "cv.html") + +# Knit the PDF version to temporary html location +tmp_html_cv_loc <- fs::file_temp(ext = ".html") +rmarkdown::render("cv.rmd", + params = list(pdf_mode = TRUE), + output_file = tmp_html_cv_loc) + +# Convert to PDF using Pagedown +pagedown::chrome_print(input = tmp_html_cv_loc, + output = "cv.pdf") diff --git a/inst/templates/resume.Rmd b/inst/templates/resume.Rmd new file mode 100644 index 0000000..1195a26 --- /dev/null +++ b/inst/templates/resume.Rmd @@ -0,0 +1,163 @@ +--- +title: Nick Strayer's Resume" +author: Nick Strayer +date: "`r Sys.Date()`" +output: + pagedown::html_resume: + css: ['css/custom_resume.css', 'css/styles.css', 'resume'] + # set it to true for a self-contained HTML page but it'll take longer to render + self_contained: true +--- + + +```{r, include=FALSE} +knitr::opts_chunk$set( + results='asis', + echo = FALSE +) +library(tidyverse) +library(glue) + +# ====================================================================== +# These variables determine how the the data is loaded and how the exports are +# done. + +# Is data stored in google sheets? If no data will be gather from the csvs/ +# folder in project +using_googlesheets <- TRUE + +# Just the copied URL from the sheet +positions_sheet_location <- "https://docs.google.com/spreadsheets/d/14MQICF2F8-vf8CKPF1m4lyGKO6_thG-4aSwat1e2TWc" + +# Is this sheet available for anyone to read? If you're using a private sheet +# set this to false and go to gather_data.R and run the data loading manually +# once to cache authentication +sheet_is_publicly_readable <- TRUE + +# Is the goal of this knit to build a document that is exported to PDF? If so +# set this to true to have links turned into footnotes at the end of the +# document +PDF_EXPORT <- FALSE + + +# A global (gasp) variable that holds all the links that were inserted for +# placement at the end +links <- c() + +# ====================================================================== +# Now we source two external scripts. One contains functions for building the +# text output and the other loads up our data from either googlesheets or csvs + +# Functions for building sections from CSV data +source('parsing_functions.R') + +# Load data for CV/Resume +source('gather_data.R') + +# Now we just need to filter down the position data to include less verbose +# categories and only the entries we have designated for the resume +position_data <- position_data %>% + filter(in_resume) %>% + mutate( + # Build some custom sections by collapsing others + section = case_when( + section %in% c('research_positions', 'industry_positions') ~ 'positions', + section %in% c('data_science_writings', 'by_me_press') ~ 'writings', + TRUE ~ section + ) + ) +``` + + + +Aside +================================================================================ + + +![logo](logo.png){width=100%} + +Contact {#contact} +-------------------------------------------------------------------------------- + +```{r} +contact_info %>% + glue_data("- {contact}") +``` + + + +Language Skills {#skills} +-------------------------------------------------------------------------------- + +```{r} +build_skill_bars(skills) +``` + + + +Open Source Contributions {#open-source} +-------------------------------------------------------------------------------- + +All projects available at `github.com/nstrayer/` + + +- `shinysense`: R package to use sensor data in Shiny apps +- `tuftesque`: Hugo theme (behind LiveFreeOrDichotomize.com) +- `sbmR`: R package for fitting stochasitic block models + + +More info {#more-info} +-------------------------------------------------------------------------------- + +See full CV at nickstrayer.me/cv for more complete list of positions and publications. + + +Disclaimer {#disclaimer} +-------------------------------------------------------------------------------- + +Made w/ [**pagedown**](https://github.com/rstudio/pagedown). + +Source code: [github.com/nstrayer/cv](https://github.com/nstrayer/cv). + +Last updated on `r Sys.Date()`. + + + +Main +================================================================================ + +Nick Strayer {#title} +-------------------------------------------------------------------------------- + +```{r} +print_text_block(text_blocks, 'intro') +``` + + + +Education {data-icon=graduation-cap data-concise=true} +-------------------------------------------------------------------------------- + +```{r} +position_data %>% print_section('education') +``` + + + +Selected Positions {data-icon=suitcase} +-------------------------------------------------------------------------------- + +```{r} +position_data %>% print_section('positions') +``` + + + +Selected Writing {data-icon=newspaper} +-------------------------------------------------------------------------------- + +```{r} +position_data %>% print_section('writings') +``` + + diff --git a/inst/templates/text_blocks.csv b/inst/templates/text_blocks.csv new file mode 100644 index 0000000..3c59328 --- /dev/null +++ b/inst/templates/text_blocks.csv @@ -0,0 +1,8 @@ +Id used for finding text block,Contents of text block. Supports markdown formatting. +loc,text +intro,"I have made [visualizations viewed by hundreds of thousands of people](https://www.nytimes.com/interactive/2016/08/26/us/college-student-migration.html), [sped up query times for 25 terabytes of data by an average of 4,800 times](https://livefreeordichotomize.com/2019/06/04/using_awk_and_r_to_parse_25tb/), and built [packages for R](https://github.com/nstrayer/shinysense) that let you [do magic](http://nickstrayer.me/dataDayTexas/). + +Currently searching for a position that allows me to build tools leveraging a combination of visualization, machine learning, and software engineering to help people explore and understand their data in new and useful ways." +industy_experience_aside,I have worked in a variety of roles ranging from journalist to software engineer to data scientist. I like collaborative environments where I can learn from my peers. +teaching_experience_aside,I am passionate about education. I believe that no topic is too complex if the teacher is empathetic and willing to think about new methods of approaching task. +data_science_writing_aside,I regularly blog about data science and visualization on my blog [LiveFreeOrDichotomize.](https://livefreeordichotomize.com/) \ No newline at end of file diff --git a/man/build_network_logo.Rd b/man/build_network_logo.Rd new file mode 100644 index 0000000..8ff4348 --- /dev/null +++ b/man/build_network_logo.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/build_network_logo.R +\name{build_network_logo} +\alias{build_network_logo} +\title{Build interactive network logo} +\usage{ +build_network_logo(position_data) +} +\arguments{ +\item{position_data}{position data from your \code{CV_Printer} class.} +} +\value{ +Interactive force-directed layout network of your CV data +} +\description{ +Constructs a network based on your position data to be used as a logo. +Interactive in HTML version and static in the PDF version. Notes are entries, +colored by section and connected if they occurred in the same year +} diff --git a/man/figures/csv_to_cv.png b/man/figures/csv_to_cv.png new file mode 100644 index 0000000..a0e6369 Binary files /dev/null and b/man/figures/csv_to_cv.png differ diff --git a/man/figures/how_to_copy_data.png b/man/figures/how_to_copy_data.png new file mode 100644 index 0000000..742466c Binary files /dev/null and b/man/figures/how_to_copy_data.png differ diff --git a/man/figures/html_vs_pdf_output.png b/man/figures/html_vs_pdf_output.png new file mode 100644 index 0000000..eacc1ed Binary files /dev/null and b/man/figures/html_vs_pdf_output.png differ diff --git a/man/figures/logo.svg b/man/figures/logo.svg new file mode 100644 index 0000000..3f7665c --- /dev/null +++ b/man/figures/logo.svg @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/man/figures/sub_sheets.png b/man/figures/sub_sheets.png new file mode 100644 index 0000000..7d846bc Binary files /dev/null and b/man/figures/sub_sheets.png differ diff --git a/man/pipe.Rd b/man/pipe.Rd new file mode 100644 index 0000000..0eec752 --- /dev/null +++ b/man/pipe.Rd @@ -0,0 +1,12 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-pipe.R +\name{\%>\%} +\alias{\%>\%} +\title{Pipe operator} +\usage{ +lhs \%>\% rhs +} +\description{ +See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details. +} +\keyword{internal} diff --git a/man/use_csv_data_storage.Rd b/man/use_csv_data_storage.Rd new file mode 100644 index 0000000..0ef8ee1 --- /dev/null +++ b/man/use_csv_data_storage.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/use_csv_data_storage.R +\name{use_csv_data_storage} +\alias{use_csv_data_storage} +\title{Use CSVs for storing data} +\usage{ +use_csv_data_storage(folder_name = "data", create_output_dir = TRUE) +} +\arguments{ +\item{folder_name}{Name of the folder you want csvs stored in relative to current working directory} + +\item{create_output_dir}{If the requested output directory is missing should it be created?} +} +\value{ +A new folder \verb{/} with \code{entries.csv}, \code{text_blocks.csv}, \code{language_skills.csv}, and \code{contact_info.csv} in it. +working directory. +} +\description{ +Sets up examples of the four CSVs needed for building CV +} +\examples{ + +# Make a temp directory for placing files +# This would be a real location for a typical situation +temp_dir <- fs::dir_create(fs::path(tempdir(), "cv_w_csvs")) + +datadrivencv::use_csv_data_storage( + folder_name = fs::path(temp_dir, "csv_data"), + create_output_dir = TRUE +) + +list.files(fs::path(temp_dir, "csv_data")) + +} diff --git a/man/use_datadriven_cv.Rd b/man/use_datadriven_cv.Rd new file mode 100644 index 0000000..dda64ec --- /dev/null +++ b/man/use_datadriven_cv.Rd @@ -0,0 +1,80 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/use_datadriven_cv.R +\name{use_datadriven_cv} +\alias{use_datadriven_cv} +\title{Use Data Driven CV template} +\usage{ +use_datadriven_cv( + full_name = "Sarah Arcos", + data_location = system.file("sample_data/", package = "datadrivencv"), + pdf_location = "https://github.com/nstrayer/cv/raw/master/strayer_cv.pdf", + html_location = "nickstrayer.me/datadrivencv/", + source_location = "https://github.com/nstrayer/datadrivencv", + which_files = "all", + output_dir = getwd(), + create_output_dir = FALSE, + use_network_logo = TRUE, + open_files = TRUE +) +} +\arguments{ +\item{full_name}{Your full name, used in title of document and header} + +\item{data_location}{Path of the spreadsheets holding all your data. This can +be either a URL to a google sheet with multiple sheets containing the four +data types or a path to a folder containing four \code{.csv}s with the neccesary +data. See \code{\link{use_csv_data_storage()}} for help setting up these +\code{.csv}s.} + +\item{pdf_location}{What location will the PDF of this CV be hosted at?} + +\item{html_location}{What location will the HTML version of this CV be hosted +at?} + +\item{source_location}{Where is the code to build your CV hosted?} + +\item{which_files}{What files should be placed? Takes a vector of possible +values \code{c("cv.rmd", "dd_cv.css", "render_cv.r", "cv_printing_functions.r")} +or \code{"all"} for everything. This can be used to incrementally update the +printing functions or CSS without loosing customizations you've made to +other files.} + +\item{output_dir}{Where should the files be placed? Defaults to your current working directory} + +\item{create_output_dir}{If the requested output directory is missing should it be created?} + +\item{use_network_logo}{Should logo be an interactive network based on your +CV data? Note that this uses the function +\code{\link{build_network_logo()}} so will introduce a dependency on this +package.} + +\item{open_files}{Should the added files be opened after creation?} +} +\value{ +\code{cv.rmd}, \code{dd_cv.css}, \code{render_cv.r}, and \code{cv_printing_functions.r} +written to the current working directory. +} +\description{ +Sets up the \code{.Rmd} file for a data-driven cv in current working directory. +Also adds css file for current CV so style can be custommized. +} +\examples{ + +# Make a temp directory for placing files +# This would be a real location for a typical situation +temp_dir <- fs::dir_create(fs::path(tempdir(), "my_cv")) + +use_datadriven_cv( + full_name = "Nick Strayer", + data_location = "https://docs.google.com/spreadsheets/d/14MQICF2F8-vf8CKPF1m4lyGKO6_thG-4aSwat1e2TWc", + pdf_location = "https://github.com/nstrayer/cv/raw/master/strayer_cv.pdf", + html_location = "nickstrayer.me/cv/", + source_location = "https://github.com/nstrayer/cv", + output_dir = temp_dir, + open_files = FALSE +) + +# Files should be where they were requested +list.files(temp_dir) + +} diff --git a/man/use_ddcv_template.Rd b/man/use_ddcv_template.Rd new file mode 100644 index 0000000..825155e --- /dev/null +++ b/man/use_ddcv_template.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/use_ddcv_template.R +\name{use_ddcv_template} +\alias{use_ddcv_template} +\title{Use template file from package} +\usage{ +use_ddcv_template( + file_name, + params = NULL, + output_file_name = file_name, + output_dir = getwd(), + create_output_dir = FALSE, + warn_about_no_change = TRUE, + open_after_making = FALSE +) +} +\arguments{ +\item{file_name}{Name of file from templates to use: e.g. \code{cv.rmd}.} + +\item{params}{Parameters used to fill in \code{whisker} template} + +\item{output_file_name}{Name of file after being placed.} + +\item{output_dir}{Directory location for output to be placed in.} + +\item{create_output_dir}{If the requested output directory is missing should it be created?} + +\item{warn_about_no_change}{If there is no change between the new file and what was already there, should a warning be issued?} + +\item{open_after_making}{Should the file be opened after it has been written?} +} +\description{ +Use template file from package +} diff --git a/pkgdown/favicon/apple-touch-icon-120x120.png b/pkgdown/favicon/apple-touch-icon-120x120.png new file mode 100644 index 0000000..1deaf8f Binary files /dev/null and b/pkgdown/favicon/apple-touch-icon-120x120.png differ diff --git a/pkgdown/favicon/apple-touch-icon-152x152.png b/pkgdown/favicon/apple-touch-icon-152x152.png new file mode 100644 index 0000000..d2196f5 Binary files /dev/null and b/pkgdown/favicon/apple-touch-icon-152x152.png differ diff --git a/pkgdown/favicon/apple-touch-icon-180x180.png b/pkgdown/favicon/apple-touch-icon-180x180.png new file mode 100644 index 0000000..8c3ae86 Binary files /dev/null and b/pkgdown/favicon/apple-touch-icon-180x180.png differ diff --git a/pkgdown/favicon/apple-touch-icon-60x60.png b/pkgdown/favicon/apple-touch-icon-60x60.png new file mode 100644 index 0000000..23b74e2 Binary files /dev/null and b/pkgdown/favicon/apple-touch-icon-60x60.png differ diff --git a/pkgdown/favicon/apple-touch-icon-76x76.png b/pkgdown/favicon/apple-touch-icon-76x76.png new file mode 100644 index 0000000..f2854d1 Binary files /dev/null and b/pkgdown/favicon/apple-touch-icon-76x76.png differ diff --git a/pkgdown/favicon/apple-touch-icon.png b/pkgdown/favicon/apple-touch-icon.png new file mode 100644 index 0000000..8df72f6 Binary files /dev/null and b/pkgdown/favicon/apple-touch-icon.png differ diff --git a/pkgdown/favicon/favicon-16x16.png b/pkgdown/favicon/favicon-16x16.png new file mode 100644 index 0000000..86cbe16 Binary files /dev/null and b/pkgdown/favicon/favicon-16x16.png differ diff --git a/pkgdown/favicon/favicon-32x32.png b/pkgdown/favicon/favicon-32x32.png new file mode 100644 index 0000000..15bf03c Binary files /dev/null and b/pkgdown/favicon/favicon-32x32.png differ diff --git a/pkgdown/favicon/favicon.ico b/pkgdown/favicon/favicon.ico new file mode 100644 index 0000000..2e51fb5 Binary files /dev/null and b/pkgdown/favicon/favicon.ico differ diff --git a/pt/biancamuniz_resume_to_pdf_pt.html b/pt/biancamuniz_resume_to_pdf_pt.html deleted file mode 100644 index 9e05dee..0000000 --- a/pt/biancamuniz_resume_to_pdf_pt.html +++ /dev/null @@ -1,32760 +0,0 @@ - - - - - - - - - - - bmuniz_resume_pt - - - - - - - - - - - - - - - -
    -

    Aside

    - -
    -

    Tech skills

    -

    -
    -
    -

    R skills

    -

    -
    -
    -

    Languages

    -
      -
    • Portuguese
    • -
    • English
    • -
    • Spanish
    • -
    -
    -
    -

    Disclaimer

    -

    Made w/ pagedown::html_resume -Code: github.com/biamuniz/resume

    -

    Last updated on 2023-12-02.

    -
    -
    -
    -

    Main

    -
    -

    Bianca Muniz

    -

    I enjoy working with data to find and deliver insights, solve business problems, and build tools that help people work more efficiently. I moved into data science after 10+ years working in public policy and public relations.

    -
    -
    -

    Professional Experience

    -
    -

    Analista de Dados

    -

    Agência Pública de Jornalismo Investigativo

    -

    São Paulo, SP

    -

    Present - 2022

    -
      -
    • Crio soluções em automação para a rotina com dados e auxilio em reportagens que possuem demandas em bases de dados
    • -
    • . Participei do projeto Mapa dos Conflitos, parceria da Agência Pública com a Comissão Pastoral da Terra (CPT)
    • -
    -
    -
    -

    Estagiária de Jornalismo de Dados

    -

    Agência Pública de Jornalismo Investigativo

    -

    São Paulo, SP

    -

    2022 - 2020

    -
      -
    • Como estagiária, participei de coberturas reconhecidas pelo Prêmio Roche de Jornalismo em Saúde e reportagens finalistas do Sigma Awards e prêmio Cláudio Weber Abramo de Jornalismo de Dados.
    • -
    -
    -
    -

    Diretora vice-presidente

    -

    Jornalismo Júnior

    -

    São Paulo, SP

    -

    2019

    -
      -
    • Desempenhei atividades como planejamento financeiro, atualização do fluxo de caixa, orçamentos, redação de contratos e regulamentação jurídica, além de gerenciar projetos internos e externos com a presidente
    • -
    • -
    -
    -
    -

    Repórter

    -

    Jornalismo Júnior

    -

    São Paulo, SP

    -

    2018

    -
    -
    -
    -

    Education

    -
    -

    Bacharelado em Jornalismo

    -

    Universidade de São Paulo - SP

    -

    São Paulo, SP

    -

    2023 - 2018

    -
      -
    • Fui repórter e editora de diferentes jornais-laboratório: o Notícias do Jardim São Remo, a Agência Universitária de Notícias, o Jornal do Campus e o suplemento claro!.
    • -
    • Membro da Jornalismo Júnior, empresa júnior de Jornalismo da ECA-USP (2018 - 2019)
    • -
    • Fui monitora dos cursos de verão do Instituto de Matemática e Estatística (IME) da USP “Python para análise de dados” (2022) e “R para análise de dados” (2023)
    • -
    -
    -
    -

    Master em Jornalismo de Dados, Automação e Data Storytelling

    -

    Insper

    -

    São Paulo, SP

    -

    2022 - 2021

    -
      -
    • Lato-sensu specialization with double certification.
    • -
    -
    -
    -

    Mestrado em Ciências

    -

    Universidade Federal de São Paulo - Unifesp

    -

    São Paulo, SP

    -

    2022 - 2018

    -
      -
    • Mestrado em Ciências pelo Programa de Pós-Graduação em Farmacologia (Conceito CAPES 6), com projeto intitulado “Manipulações farmacológicas e não-farmacológicas para o reestabelecimento de um”tônus hedônico” em um modelo animal de esquizofrenia: a linhagem SHR”
    • -
    • Bolsista da Coordenação de Aperfeiçoamento de Pessoal de Nível Superior, CAPES (2018 - 2019)
    • -
    -
    -
    -

    Bacharelado em Biomedicina

    -

    Universidade Federal de São Paulo - Unifesp

    -

    São Paulo, SP

    -

    2017 - 2014

    -
      -
    • Participei da equipe executora do projeto de extensão universitária “Patógenos em Jogo” (2016)
    • -
    • Organizei o XV Curso de Inverno da Biomedicina - Unifesp (2017)
    • -
    • -
    -
    -
    -
    -

    Awards

    -
    -

    Prêmios e menções honrosas

    -

    Associado à Agência Pública de Jornalismo Investigativo

    -

    N/A

    -

    2021

    -
      -
    • Prêmio Cláudio Weber Abramo de Jornalismo de Dados - Shortlist (2021)
    • -
    • The Sigma Awards 2021 - Shortlist
    • -
    • Prêmio Roche de Jornalismo em Saúde (Menção honrosa)
    • -
    • 39º Prêmio Direitos Humanos de Jornalismo
    • -
    • 39º Prêmio Direitos Humanos de Jornalismo
    • -
    • Prêmio Cláudio Weber Abramo de Jornalismo de Dados - Shortlist (2022)
    • -
    • The Sigma Awards 2022 - Shortlist
    • -
    • Prêmio ANA
    • -
    • 39º Prêmio Direitos Humanos de Jornalismo (Menção honrosa)
    • -
    • Design for a Better World Award 2022
    • -
    -
    -
    -

    Bolsas e fellowships

    -

    N/A

    -

    N/A

    -

    2020

    -
      -
    • Bolsista do Amazon Rainforest Journalism Fund, do Pulitzer Center
    • -
    • Bolsa de Empreendedorismo e Inovação
    • -
    -
    -
    -
    -

    Talks

    -
    -

    Talks e workshops

    -

    N/A

    -

    N/A

    -

    N/A

    -
      -
    • Workshop “Python + R juntos com Quarto: seus relatórios nunca mais serão os mesmos” (2023, CODA Amazônia)
    • -
    • Workshop “Planilha: uma grande aliada da análise de dados” (2023, CODA Amazônia)
    • -
    • Workshop “Jornalismo de dados com editores de planilhas” (2022, CODA.Br)
    • -
    • Webinar “A Pública em dados e o Mapa dos Conflitos” (2022)
    • -
    • Laboratório de Dados Climáticos - Alma Preta Jornalismo
    • -
    • Introdução ao R e ao Tidyverse
    • -
    • Open Data Day
    • -
    • Jornalismo de dados para mudar realidades
    • -
    • Visualizações de dados revelam problemas sociais
    • -
    • Treinamento Portal Assobiar
    • -
    • Curso Comunicação: prática e reflexão
    • -
    • Dataviz na Agência Pública
    • -
    • Cadeia de Negócios - Dados em todos os setores da economia - Jornalismo de dados
    • -
    -
    -
    -
    -

    Trabalho Voluntário

    -
      -
    • R-Ladies São Paulo
    • -
    • Cursinho pré-vestibular Jeannine Aboulafia
    • -
    -
    -
    -

    Disclaimer

    -

    Made w/ pagedown::html_resume -Code: github.com/biamuniz/resume

    -

    Last updated on 2023-12-02.

    -
    -
    - - - - - - diff --git a/pt/index.html b/pt/index.html deleted file mode 100644 index aefab42..0000000 --- a/pt/index.html +++ /dev/null @@ -1,32756 +0,0 @@ - - - - - - - - - - - bmuniz_resume_pt - - - - - - - - - - - - - - - -
    -

    Aside

    - -
    -

    Tech skills

    -

    -
    -
    -

    R skills

    -

    -
    -
    -

    Languages

    -
      -
    • Portuguese
    • -
    • English
    • -
    • Spanish
    • -
    -
    -
    -

    Disclaimer

    -

    Made w/ pagedown.
    -Source code: Github repo.

    -

    Last updated on 2023-12-02.

    -
    -
    -
    -

    Main

    -
    -

    Bianca Muniz

    -

    I enjoy working with data to find and deliver insights, solve business problems, and build tools that help people work more efficiently. I moved into data science after 10+ years working in public policy and public relations.

    -
    -
    -

    Professional Experience

    -
    -

    Analista de Dados

    -

    Agência Pública de Jornalismo Investigativo

    -

    São Paulo, SP

    -

    Present - 2022

    -
      -
    • Crio soluções em automação para a rotina com dados e auxilio em reportagens que possuem demandas em bases de dados
    • -
    • . Participei do projeto Mapa dos Conflitos, parceria da Agência Pública com a Comissão Pastoral da Terra (CPT)
    • -
    -
    -
    -

    Estagiária de Jornalismo de Dados

    -

    Agência Pública de Jornalismo Investigativo

    -

    São Paulo, SP

    -

    2022 - 2020

    -
      -
    • Como estagiária, participei de coberturas reconhecidas pelo Prêmio Roche de Jornalismo em Saúde e reportagens finalistas do Sigma Awards e prêmio Cláudio Weber Abramo de Jornalismo de Dados.
    • -
    -
    -
    -

    Diretora vice-presidente

    -

    Jornalismo Júnior

    -

    São Paulo, SP

    -

    2019

    -
      -
    • Desempenhei atividades como planejamento financeiro, atualização do fluxo de caixa, orçamentos, redação de contratos e regulamentação jurídica, além de gerenciar projetos internos e externos com a presidente
    • -
    • -
    -
    -
    -

    Repórter

    -

    Jornalismo Júnior

    -

    São Paulo, SP

    -

    2018

    -
    -
    -
    -

    Education

    -
    -

    Bacharelado em Jornalismo

    -

    Universidade de São Paulo - SP

    -

    São Paulo, SP

    -

    2023 - 2018

    -
      -
    • Fui repórter e editora de diferentes jornais-laboratório: o Notícias do Jardim São Remo, a Agência Universitária de Notícias, o Jornal do Campus e o suplemento claro!.
    • -
    • Membro da Jornalismo Júnior, empresa júnior de Jornalismo da ECA-USP (2018 - 2019)
    • -
    • Fui monitora dos cursos de verão do Instituto de Matemática e Estatística (IME) da USP “Python para análise de dados” (2022) e “R para análise de dados” (2023)
    • -
    -
    -
    -

    Master em Jornalismo de Dados, Automação e Data Storytelling

    -

    Insper

    -

    São Paulo, SP

    -

    2022 - 2021

    -
      -
    • Lato-sensu specialization with double certification.
    • -
    -
    -
    -

    Mestrado em Ciências

    -

    Universidade Federal de São Paulo - Unifesp

    -

    São Paulo, SP

    -

    2022 - 2018

    -
      -
    • Mestrado em Ciências pelo Programa de Pós-Graduação em Farmacologia (Conceito CAPES 6), com projeto intitulado “Manipulações farmacológicas e não-farmacológicas para o reestabelecimento de um”tônus hedônico” em um modelo animal de esquizofrenia: a linhagem SHR”
    • -
    • Bolsista da Coordenação de Aperfeiçoamento de Pessoal de Nível Superior, CAPES (2018 - 2019)
    • -
    -
    -
    -

    Bacharelado em Biomedicina

    -

    Universidade Federal de São Paulo - Unifesp

    -

    São Paulo, SP

    -

    2017 - 2014

    -
      -
    • Participei da equipe executora do projeto de extensão universitária “Patógenos em Jogo” (2016)
    • -
    • Organizei o XV Curso de Inverno da Biomedicina - Unifesp (2017)
    • -
    • -
    -
    -
    -
    -

    Awards

    -
    -

    Prêmios e menções honrosas

    -

    Associado à Agência Pública de Jornalismo Investigativo

    -

    N/A

    -

    2021

    -
      -
    • Prêmio Cláudio Weber Abramo de Jornalismo de Dados - Shortlist (2021)
    • -
    • The Sigma Awards 2021 - Shortlist
    • -
    • Prêmio Roche de Jornalismo em Saúde (Menção honrosa)
    • -
    • 39º Prêmio Direitos Humanos de Jornalismo
    • -
    • 39º Prêmio Direitos Humanos de Jornalismo
    • -
    • Prêmio Cláudio Weber Abramo de Jornalismo de Dados - Shortlist (2022)
    • -
    • The Sigma Awards 2022 - Shortlist
    • -
    • Prêmio ANA
    • -
    • 39º Prêmio Direitos Humanos de Jornalismo (Menção honrosa)
    • -
    • Design for a Better World Award 2022
    • -
    -
    -
    -

    Bolsas e fellowships

    -

    N/A

    -

    N/A

    -

    2020

    -
      -
    • Bolsista do Amazon Rainforest Journalism Fund, do Pulitzer Center
    • -
    • Bolsa de Empreendedorismo e Inovação
    • -
    -
    -
    -
    -

    Talks

    -
    -

    Talks e workshops

    -

    N/A

    -

    N/A

    -

    N/A

    -
      -
    • Workshop “Python + R juntos com Quarto: seus relatórios nunca mais serão os mesmos” (2023, CODA Amazônia)
    • -
    • Workshop “Planilha: uma grande aliada da análise de dados” (2023, CODA Amazônia)
    • -
    • Workshop “Jornalismo de dados com editores de planilhas” (2022, CODA.Br)
    • -
    • Webinar “A Pública em dados e o Mapa dos Conflitos” (2022)
    • -
    • Laboratório de Dados Climáticos - Alma Preta Jornalismo
    • -
    • Introdução ao R e ao Tidyverse
    • -
    • Open Data Day
    • -
    • Jornalismo de dados para mudar realidades
    • -
    • Visualizações de dados revelam problemas sociais
    • -
    • Treinamento Portal Assobiar
    • -
    • Curso Comunicação: prática e reflexão
    • -
    • Dataviz na Agência Pública
    • -
    • Cadeia de Negócios - Dados em todos os setores da economia - Jornalismo de dados
    • -
    -
    -
    -
    -

    Trabalho Voluntário

    -
      -
    • R-Ladies São Paulo
    • -
    • Cursinho pré-vestibular Jeannine Aboulafia
    • -
    -
    -
    -

    Disclaimer

    -

    Made w/ pagedown.
    -Source code: Github repo.

    -

    Last updated on 2023-12-02.

    -
    -
    - - - - - - diff --git a/render_cv.r b/render_cv.r new file mode 100644 index 0000000..8f31926 --- /dev/null +++ b/render_cv.r @@ -0,0 +1,21 @@ +# This script builds both the HTML and PDF versions of your CV + +# If you wanted to speed up rendering for googlesheets driven CVs you could use +# this script to cache a version of the CV_Printer class with data already +# loaded and load the cached version in the .Rmd instead of re-fetching it twice +# for the HTML and PDF rendering. This exercise is left to the reader. + +# Knit the HTML version +rmarkdown::render("cv.rmd", + params = list(pdf_mode = FALSE), + output_file = "cv.html") + +# Knit the PDF version to temporary html location +tmp_html_cv_loc <- fs::file_temp(ext = ".html") +rmarkdown::render("cv.rmd", + params = list(pdf_mode = TRUE), + output_file = tmp_html_cv_loc) + +# Convert to PDF using Pagedown +pagedown::chrome_print(input = tmp_html_cv_loc, + output = "cv.pdf") diff --git a/resume-en.Rmd b/resume-en.Rmd deleted file mode 100644 index 0a18451..0000000 --- a/resume-en.Rmd +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: bmuniz_resume_en -author: Bianca Muniz -date: "`r Sys.Date()`" -output: - pagedown::html_resume: - css: ['css/custom_resume.css', 'css/styles_html.css', 'resume'] - # css: ['css/custom_resume.css', 'css/styles_pdf.css', 'resume'] - # set it to true for a self-contained HTML page but it'll take longer to render - self_contained: true -params: - doctype: "HTML" # "PDF" or "HTML" is automatically updated in update_resume.R ---- - -```{r setup, include=FALSE} -knitr::opts_chunk$set( - results='asis', - echo = FALSE -) - -#load packages -library(glue) - -#source helper functions -source('helper_functions.R') - -position_data <- read.csv2('./data/position_data_en.csv', stringsAsFactors = F) -skill_data <- read.csv('./data/skill_data.csv', stringsAsFactors = F) -``` - -# Aside - -## Contact {#contact} - -```{r contactinfo} - -# Adjust contact info based on document type - html has link while pdf does not -if(params$doctype == "HTML") { - glue::glue(' - - Cajamar, São Paulo - - bnc.muniz@gmail.com - - biancamuniz@apublica.com - - [github.com/biamuniz](https://github.com/biamuniz) - - [linkedin.com/bmuniz](https://www.linkedin.com/in/bmuniz/)') -} - -if(params$doctype == "PDF") { - glue::glue(' - - Cajamar, São Paulo - - bnc.muniz@gmail.com - - biancamuniz@apublica.com - - [github.com/biamuniz](https://github.com/biamuniz) - - [linkedin.com/bmuniz](https://www.linkedin.com/in/bmuniz/)') -} - -``` - -```{r download} -# if doctype is html add link to download a pdf version -if(params$doctype == "HTML"){ - cat("[ Download as a PDF](https://github.com/biamuniz/resume/raw/main/bianca_muniz_resume_english.pdf)") -} - -``` - -
    - -## Tech skills {#skills} - -```{r codingskills, dev.args = list(bg = 'transparent')} - -# function from helper_functions.R -build_skill_bars(skill_data, "Tech") - -``` - -## R skills {#rpackages} - -```{r rskills, dev.args = list(bg = 'transparent')} - -# function from helper_functions.R -build_skill_bars(skill_data, "R Packages") - -``` - - -## Languages {#rpackages} - -- Portuguese -- English -- Spanish - -## Disclaimer {#disclaimer} - -```{r} - -# set disclaimer in bottom right corner based on doctype -if(params$doctype == "HTML") { - glue::glue(" - Made w/ [**pagedown**](https://github.com/rstudio/pagedown). - Source code: [**Github repo**](https://github.com/biamuniz/resume). - ") -} -if(params$doctype == "PDF") { - glue::glue(" - Made w/ **pagedown::html_resume** - Code: **github.com/biamuniz/resume** - ") -} -``` - -Last updated on `r Sys.Date()`.

    - -# Main - -## Bianca Muniz {#title} - -I enjoy working with data to find and deliver insights, solve business problems, and build tools that help people work more efficiently. I moved into data science after 10+ years working in public policy and public relations. - -## Professional Experience {data-icon="suitcase"} - -```{r jobs} -print_section(position_data, 'work') -``` - -## Education {data-icon="graduation-cap" data-concise="true"} - -```{r education} -print_section(position_data, 'education') -``` - -## Awards {data-icon="award" data-concise="true"} - -```{r awards} -print_section(position_data, 'awards') -``` - -## Talks {data-icon="person-chalkboard" data-concise="true"} - -```{r talks} -print_section(position_data, 'talks') -``` - -## Voluntary work {data-icon="people-group" data-concise="true"} - -- R-Ladies São Paulo -- Cursinho pré-vestibular Jeannine Aboulafia - - -## Disclaimer {#disclaimer} - -```{r} - -# set disclaimer in bottom right corner based on doctype -if(params$doctype == "HTML") { - glue::glue(" - Made w/ [**pagedown**](https://github.com/rstudio/pagedown). - Source code: [**Github repo**](https://github.com/biamuniz/resume). - ") -} -if(params$doctype == "PDF") { - glue::glue(" - Made w/ **pagedown::html_resume** - Code: **github.com/biamuniz/resume** - ") -} -``` - -Last updated on `r Sys.Date()`.

    diff --git a/resume-pt.Rmd b/resume-pt.Rmd deleted file mode 100644 index c6125ab..0000000 --- a/resume-pt.Rmd +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: bmuniz_resume_pt -author: Bianca Muniz -date: "`r Sys.Date()`" -output: - pagedown::html_resume: - css: ['css/custom_resume.css', 'css/styles_html.css', 'resume'] - # css: ['css/custom_resume.css', 'css/styles_pdf.css', 'resume'] - # set it to true for a self-contained HTML page but it'll take longer to render - self_contained: true -params: - doctype: "HTML" # "PDF" or "HTML" is automatically updated in update_resume.R ---- - -```{r setup, include=FALSE} -knitr::opts_chunk$set( - results='asis', - echo = FALSE -) - -#load packages -library(glue) - -#source helper functions -source('helper_functions.R') - -position_data <- read.csv2('./data/position_data_pt2.csv', stringsAsFactors = F) -skill_data <- read.csv('./data/skill_data.csv', stringsAsFactors = F) -``` - -# Aside - -## Contact {#contact} - -```{r contactinfo} - -# Adjust contact info based on document type - html has link while pdf does not -if(params$doctype == "HTML") { - glue::glue(' - - Cajamar, São Paulo - - (11) 972562470 - - bnc.muniz@gmail.com - - biancamuniz@apublica.com - - [github.com/biamuniz](https://github.com/biamuniz) - - [linkedin.com/bmuniz](https://www.linkedin.com/in/bmuniz/)') -} - -if(params$doctype == "PDF") { - glue::glue(' - - Cajamar, São Paulo - - (11) 972562470 - - bnc.muniz@gmail.com - - biancamuniz@apublica.com - - [github.com/biamuniz](https://github.com/biamuniz) - - [linkedin.com/bmuniz](https://www.linkedin.com/in/bmuniz/)') -} - -``` - -```{r download} -# if doctype is html add link to download a pdf version -if(params$doctype == "HTML"){ - cat("[ Download as a PDF](https://github.com/biamuniz/resume/raw/main/bianca_muniz_resume.pdf)") -} - -``` - -
    - -## Tech skills {#skills} - -```{r codingskills, dev.args = list(bg = 'transparent')} - -# function from helper_functions.R -build_skill_bars(skill_data, "Tech") - -``` - -## R skills {#rpackages} - -```{r rskills, dev.args = list(bg = 'transparent')} - -# function from helper_functions.R -build_skill_bars(skill_data, "R Packages") - -``` - - -## Languages - -- Portuguese -- English -- Spanish - -## Disclaimer {#disclaimer} - -```{r} - -# set disclaimer in bottom right corner based on doctype -if(params$doctype == "HTML") { - glue::glue(" - Made w/ [**pagedown**](https://github.com/rstudio/pagedown). - Source code: [**Github repo**](https://github.com/biamuniz/resume). - ") -} -if(params$doctype == "PDF") { - glue::glue(" - Made w/ **pagedown::html_resume** - Code: **github.com/biamuniz/resume** - ") -} -``` - -Last updated on `r Sys.Date()`.

    - -# Main - -## Bianca Muniz {#title} - -I enjoy working with data to find and deliver insights, solve business problems, and build tools that help people work more efficiently. I moved into data science after 10+ years working in public policy and public relations. - -## Professional Experience {data-icon="suitcase"} - -```{r jobs} -print_section(position_data, 'work') -``` - -## Education {data-icon="graduation-cap" data-concise="true"} - -```{r education} -print_section(position_data, 'education') -``` - -## Awards {data-icon="award" data-concise="true"} - -```{r awards} -print_section(position_data, 'awards') -``` - -## Talks {data-icon="award" data-concise="true"} - -```{r talks} -print_section(position_data, 'talks') -``` - -## Trabalho Voluntário - -- R-Ladies São Paulo -- Cursinho pré-vestibular Jeannine Aboulafia - - -## Disclaimer {#disclaimer} - -```{r} - -# set disclaimer in bottom right corner based on doctype -if(params$doctype == "HTML") { - glue::glue(" - Made w/ [**pagedown**](https://github.com/rstudio/pagedown). - Source code: [**Github repo**](https://github.com/biamuniz/resume). - ") -} -if(params$doctype == "PDF") { - glue::glue(" - Made w/ **pagedown::html_resume** - Code: **github.com/biamuniz/resume** - ") -} -``` - -Last updated on `r Sys.Date()`.

    diff --git a/tests/CV_printing_functions.R b/tests/CV_printing_functions.R new file mode 100644 index 0000000..a730003 --- /dev/null +++ b/tests/CV_printing_functions.R @@ -0,0 +1,226 @@ +# This file contains all the code needed to parse and print various sections of your CV +# from data. Feel free to tweak it as you desire! + + +#' Create a CV_Printer object. +#' +#' @param data_location Path of the spreadsheets holding all your data. This can be +#' either a URL to a google sheet with multiple sheets containing the four +#' data types or a path to a folder containing four `.csv`s with the neccesary +#' data. +#' @param source_location Where is the code to build your CV hosted? +#' @param pdf_mode Is the output being rendered into a pdf? Aka do links need +#' to be stripped? +#' @param sheet_is_publicly_readable If you're using google sheets for data, +#' is the sheet publicly available? (Makes authorization easier.) +#' @return A new `CV_Printer` object. +create_CV_object <- function(data_location, + pdf_mode = FALSE, + sheet_is_publicly_readable = TRUE) { + + cv <- list( + pdf_mode = pdf_mode, + links = c() + ) + + is_google_sheets_location <- stringr::str_detect(data_location, "docs\\.google\\.com") + + if(is_google_sheets_location){ + if(sheet_is_publicly_readable){ + # This tells google sheets to not try and authenticate. Note that this will only + # work if your sheet has sharing set to "anyone with link can view" + googlesheets4::sheets_deauth() + } else { + # My info is in a public sheet so there's no need to do authentication but if you want + # to use a private sheet, then this is the way you need to do it. + # designate project-specific cache so we can render Rmd without problems + options(gargle_oauth_cache = ".secrets") + } + + cv$entries_data <- googlesheets4::read_sheet(data_location, sheet = "entries", skip = 1) %>% + # Google sheets loves to turn columns into list ones if there are different types + dplyr::mutate_if(is.list, purrr::map_chr, as.character) + + cv$skills <- googlesheets4::read_sheet(data_location, sheet = "language_skills", skip = 1) + cv$text_blocks <- googlesheets4::read_sheet(data_location, sheet = "text_blocks", skip = 1) + cv$contact_info <- googlesheets4::read_sheet(data_location, sheet = "contact_info", skip = 1) + } else { + # Want to go old-school with csvs? + cv$entries_data <- readr::read_csv(paste0(data_location, "entries.csv"), skip = 1) + cv$skills <- readr::read_csv(paste0(data_location, "language_skills.csv"), skip = 1) + cv$text_blocks <- readr::read_csv(paste0(data_location, "text_blocks.csv"), skip = 1) + cv$contact_info <- readr::read_csv(paste0(data_location, "contact_info.csv"), skip = 1) + } + + + # This year is assigned to the end date of "current" events to make sure they get sorted later. + future_year <- lubridate::year(lubridate::ymd(Sys.Date())) + 10 + + # Clean up entries dataframe to format we need it for printing + cv$entries_data %<>% + tidyr::unite( + tidyr::starts_with('description'), + col = "description_bullets", + sep = "\n- ", + na.rm = TRUE + ) %>% + dplyr::mutate( + description_bullets = paste0("- ", description_bullets), + no_start = is.na(start), + has_start = !no_start, + no_end = is.na(end), + has_end = !no_end, + cur_end = tolower(end) %in% c("current", "now", ""), + end_num = ifelse (cur_end | no_end, future_year, end), + timeline = dplyr::case_when( + no_start & no_end ~ "N/A", + no_start & has_end ~ as.character(end), + has_start & no_end ~ paste(start, "-", "Current"), + TRUE ~ paste(end, "-", start) + ) + ) %>% + dplyr::select(-no_start, -has_start, -no_end, -has_end, -cur_end) %>% + dplyr::arrange(desc(end_num)) %>% + dplyr::mutate_all(~ ifelse(is.na(.), 'N/A', .)) + + cv +} + + +# Remove links from a text block and add to internal list +sanitize_links <- function(cv, text){ + if(cv$pdf_mode){ + link_titles <- stringr::str_extract_all(text, '(?<=\\[).+?(?=\\])')[[1]] + link_destinations <- stringr::str_extract_all(text, '(?<=\\().+?(?=\\))')[[1]] + + n_links <- length(cv$links) + n_new_links <- length(link_titles) + + if(n_new_links > 0){ + # add links to links array + cv$links <- c(cv$links, link_destinations) + + # Build map of link destination to superscript + link_superscript_mappings <- purrr::set_names( + paste0("", (1:n_new_links) + n_links, ""), + paste0("(", link_destinations, ")") + ) + + # Replace the link destination and remove square brackets for title + text <- text %>% + stringr::str_replace_all(stringr::fixed(link_superscript_mappings)) %>% + stringr::str_replace_all('\\[(.+?)\\]', "\\1") + } + } + + list(cv = cv, text = text) +} + + +#' @description Take a position data frame and the section id desired and prints the section to markdown. +#' @param section_id ID of the entries section to be printed as encoded by the `section` column of the `entries` table +print_section <- function(cv, section_id, glue_template = "default"){ + + if(glue_template == "default"){ + glue_template <- " +### {title} + +{loc} + +{institution} + +{timeline} + +{description_bullets} +\n\n\n" + } + + section_data <- dplyr::filter(cv$entries_data, section == section_id) + + # Take entire entries data frame and removes the links in descending order + # so links for the same position are right next to each other in number. + for(i in 1:nrow(section_data)){ + for(col in c('title', 'description_bullets')){ + strip_res <- sanitize_links(cv, section_data[i, col]) + section_data[i, col] <- strip_res$text + cv <- strip_res$cv + } + } + + print(glue::glue_data(section_data, glue_template)) + + invisible(strip_res$cv) +} + + + +#' @description Prints out text block identified by a given label. +#' @param label ID of the text block to print as encoded in `label` column of `text_blocks` table. +print_text_block <- function(cv, label){ + text_block <- dplyr::filter(cv$text_blocks, loc == label) %>% + dplyr::pull(text) + + strip_res <- sanitize_links(cv, text_block) + + cat(strip_res$text) + + invisible(strip_res$cv) +} + + + +#' @description Construct a bar chart of skills +#' @param out_of The relative maximum for skills. Used to set what a fully filled in skill bar is. +print_skill_bars <- function(cv, out_of = 5, bar_color = "#969696", bar_background = "#d9d9d9", glue_template = "default"){ + + if(glue_template == "default"){ + glue_template <- " +
    {skill}
    " + } + cv$skills %>% + dplyr::mutate(width_percent = round(100*level/out_of)) %>% + glue::glue_data(glue_template) %>% + print() + + invisible(cv) +} + + + +#' @description List of all links in document labeled by their superscript integer. +print_links <- function(cv) { + n_links <- length(cv$links) + if (n_links > 0) { + cat(" +Links {data-icon=link} +-------------------------------------------------------------------------------- + +
    + + +") + + purrr::walk2(cv$links, 1:n_links, function(link, index) { + print(glue::glue('{index}. {link}')) + }) + } + + invisible(cv) +} + + + +#' @description Contact information section with icons +print_contact_info <- function(cv){ + glue::glue_data( + cv$contact_info, + "- {contact}" + ) %>% print() + + invisible(cv) +} diff --git a/tests/date_options/setup_test.R b/tests/date_options/setup_test.R new file mode 100644 index 0000000..12350f4 --- /dev/null +++ b/tests/date_options/setup_test.R @@ -0,0 +1,12 @@ +library(here) + + +datadrivencv::use_datadriven_cv( + full_name = "Testing McTester", + # data_location = "https://docs.google.com/spreadsheets/d/1SC8dKPlPZDA1MECZr8xlJPitjWQ3AV4eXrPvnlNv7m8/", + data_location = "https://docs.google.com/spreadsheets/d/14MQICF2F8-vf8CKPF1m4lyGKO6_thG-4aSwat1e2TWc", + output_dir = here("tests/date_options"), + open_files = FALSE, + which_files = "all" + # which_files = c("cv_printing_functions.r") +) diff --git a/tests/testthat.R b/tests/testthat.R new file mode 100644 index 0000000..12c2b61 --- /dev/null +++ b/tests/testthat.R @@ -0,0 +1,4 @@ +library(testthat) +library(datadrivencv) + +test_check("datadrivencv") diff --git a/tests/testthat/test-rendering_cv.R b/tests/testthat/test-rendering_cv.R new file mode 100644 index 0000000..c75c123 --- /dev/null +++ b/tests/testthat/test-rendering_cv.R @@ -0,0 +1,52 @@ +test_that("Rendering to HTML works", { + # Make a temp directory for placing files + # and make sure it's empty + temp_dir <- fs::dir_create(fs::path(tempdir(), "test_dir")) + + + # Setup data + data_loc <- fs::path(temp_dir, "csv_data") + datadrivencv::use_csv_data_storage( + folder_name = data_loc, + create_output_dir = TRUE + ) + + # Setup files + datadrivencv::use_datadriven_cv( + full_name = "Testing McTester", + data_location = paste0(data_loc, "/"), + output_dir = temp_dir, + open_files = FALSE, + use_network_logo = TRUE + ) + + # Knit the HTML version + html_knit_res <- rmarkdown::render(fs::path(temp_dir, "cv.rmd"), + params = list(pdf_mode = FALSE), + output_file = fs::path(temp_dir, "cv.html"), + quiet = TRUE) + + expect_true(fs::file_exists(html_knit_res)) + + # Knit version for PDF + pdf_knit_res <-rmarkdown::render(fs::path(temp_dir, "cv.rmd"), + params = list(pdf_mode = TRUE), + output_file = fs::path(temp_dir, "cv_4_pdf.html"), + quiet = TRUE) + + has_link_section <- function(html_text){ + stringr::str_detect( + html_text, + stringr::fixed("
    +

    Links

    ") + ) + } + # Make sure that the output has a links section at the end of it + expect_true(has_link_section(readr::read_file(pdf_knit_res))) + + # Also make sure the html output doesn't have the links section + expect_false(has_link_section(readr::read_file(html_knit_res))) + + # Clean up temp dir + fs::dir_walk(temp_dir, fs::file_delete) +}) diff --git a/tests/testthat/test-use_datadriven_cv.R b/tests/testthat/test-use_datadriven_cv.R new file mode 100644 index 0000000..107dd21 --- /dev/null +++ b/tests/testthat/test-use_datadriven_cv.R @@ -0,0 +1,121 @@ +test_that("Addition of all files works", { + + # Make a temp directory for placing files + temp_dir <- fs::dir_create(fs::path(tempdir(), "test_dir")) + + datadrivencv::use_datadriven_cv( + full_name = "Testing McTester", + data_location = "here/be/my/data/", + output_dir = temp_dir, + open_files = FALSE + ) + expect_true( + all(c("cv.rmd", "dd_cv.css", "render_cv.r", "cv_printing_functions.r") %in% list.files(temp_dir)) + ) + + # Clean up temp dir + fs::dir_walk(temp_dir, fs::file_delete) +}) + + +test_that("Addition of subset of files", { + + # Make a temp directory for placing files + temp_dir <- fs::dir_create(fs::path(tempdir(), "test_dir")) + + datadrivencv::use_datadriven_cv( + full_name = "Testing McTester", + data_location = "here/be/my/data/", + output_dir = temp_dir, + which_files = c("render_cv.r", "cv_printing_functions.r"), + open_files = FALSE + ) + + expect_true( + all(c("render_cv.r", "cv_printing_functions.r") %in% list.files(temp_dir)) + ) + + expect_false("cv.rmd" %in% list.files(temp_dir)) + expect_false("dd_cv.css" %in% list.files(temp_dir)) + # Clean up temp dir + fs::dir_walk(temp_dir, fs::file_delete) +}) + + +test_that("Warns when trying to update a file with no change", { + + # Make a temp directory for placing files + temp_dir <- fs::dir_create(fs::path(tempdir(), "test_dir")) + + + # First dump all files into directory + datadrivencv::use_datadriven_cv( + full_name = "Testing McTester", + data_location = "here/be/my/data/", + output_dir = temp_dir, + which_files = "all", + open_files = FALSE + ) + + # Then try an update of the rmd file that has no changes + expect_warning( + datadrivencv::use_datadriven_cv( + full_name = "Testing McTester", + data_location = "here/be/my/data/", + output_dir = temp_dir, + which_files = c("cv.rmd"), + open_files = FALSE + ), + "cv.rmd already exists and there are no differences with the current version.", + fixed = TRUE + ) + + # Finally, do an update with a different name which should not give a warning + testthat::expect_silent( + datadrivencv::use_datadriven_cv( + full_name = "Testing McTester the second", + data_location = "here/be/my/data/", + output_dir = temp_dir, + which_files = c("cv.rmd"), + open_files = FALSE + ) + ) + + # Clean up temp dir + fs::dir_walk(temp_dir, fs::file_delete) +}) + + +test_that("Addition of all data csvs works", { + + # Make a temp directory for placing files + temp_dir <- fs::dir_create(fs::path(tempdir(), "test_dir")) + + # Wont make a new directory for you if you dont want it to + expect_error( + datadrivencv::use_csv_data_storage( + folder_name = fs::path(temp_dir, "csv_data"), + create_output_dir = FALSE + ), + paste( + "The requested output directory:", + fs::path(temp_dir, "csv_data"), + "doesn't exist. Either set create_output_dir = TRUE or manually make directory." + ), + fixed = TRUE + ) + + # Will make directory for you if you want it to + datadrivencv::use_csv_data_storage( + folder_name = fs::path(temp_dir, "csv_data"), + create_output_dir = TRUE + ) + + expect_true( + all(c("entries.csv", "text_blocks.csv", "language_skills.csv","contact_info.csv" ) %in% list.files(fs::path(temp_dir, "csv_data"))) + ) + + # Clean up temp dir + fs::dir_walk(temp_dir, fs::file_delete) +}) + diff --git a/update_resume.R b/update_resume.R deleted file mode 100644 index 3e2965e..0000000 --- a/update_resume.R +++ /dev/null @@ -1,47 +0,0 @@ -# author: matt leary -# date: November 19, 2019 -# intent: update the resume html and pdf file in one script - -rmarkdown::render(input = "resume-en.Rmd", - output_file = "en/index.html", - output_options = list( - css = c('css/custom_resume.css', 'css/styles_html.css', 'resume'), - self_contained = TRUE - ), - params = list( - doctype = "HTML" - ) - ) - -rmarkdown::render(input = "resume-pt.Rmd", - output_file = "pt/index.html", - output_options = list( - css = c('css/custom_resume.css', 'css/styles_html.css', 'resume'), - self_contained = TRUE - ), - params = list( - doctype = "HTML" - ) -) - -rmarkdown::render(input = "resume-en.Rmd", - output_file = "en/biancamuniz_resume_pdf_en.html", - output_options = list( - css = c('css/custom_resume.css', 'css/styles_pdf.css', 'resume') - ), - params = list( - doctype = "PDF" - ) -) - -rmarkdown::render(input = "resume-pt.Rmd", - output_file = "pt/biancamuniz_resume_to_pdf_pt.html", - output_options = list( - css = c('css/custom_resume.css', 'css/styles_pdf.css', 'resume') - ), - params = list( - doctype = "PDF" - ) -) - -