Skip to content

Commit

Permalink
fix(gatsby-transformer-remark): properly bubble up errors thrown in s…
Browse files Browse the repository at this point in the history
…ubplugins (#9972)

This allow `gatsby-remark-*` subplugins to not have special error handling - `gatsby` will properly panic on build, but dev server won't crash anymore

<!--
  Q. Which branch should I use for my pull request?
  A. Use `master` branch (probably).

  Q. Which branch if my change is a bug fix for Gatsby v1?
  A. In this case, you should use the `v1` branch

  Q. Which branch if I'm still not sure?
  A. Use `master` branch. Ask in the PR if you're not sure and a Gatsby maintainer will be happy to help :)

  Note: We will only accept bug fixes for Gatsby v1. New features should be added to Gatsby v2.

  Learn more about contributing: https://www.gatsbyjs.org/docs/how-to-contribute/
-->
  • Loading branch information
tmfelwu authored and pieh committed Nov 28, 2018
1 parent 0c31478 commit b7d4656
Showing 1 changed file with 87 additions and 88 deletions.
175 changes: 87 additions & 88 deletions packages/gatsby-transformer-remark/src/extend-node-type.js
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ module.exports = (
ASTPromiseMap.delete(cacheKey)
}).catch(err => {
ASTPromiseMap.delete(cacheKey)
throw err
return err
})
// Save new AST to cache and return
// We can now release promise, as we cached result
Expand All @@ -137,99 +137,98 @@ module.exports = (
}
}

function getMarkdownAST(markdownNode) {
return new Promise(async (resolve, reject) => {
if (process.env.NODE_ENV !== `production` || !fileNodes) {
fileNodes = getNodesByType(`File`)
async function getMarkdownAST(markdownNode) {
if (process.env.NODE_ENV !== `production` || !fileNodes) {
fileNodes = getNodesByType(`File`)
}
// Use Bluebird's Promise function "each" to run remark plugins serially.
await Promise.each(pluginOptions.plugins, plugin => {
const requiredPlugin = require(plugin.resolve)
if (_.isFunction(requiredPlugin.mutateSource)) {
return requiredPlugin.mutateSource(
{
markdownNode,
files: fileNodes,
getNode,
reporter,
cache,
},
plugin.pluginOptions
)
} else {
return Promise.resolve()
}
// Use Bluebird's Promise function "each" to run remark plugins serially.
await Promise.each(pluginOptions.plugins, plugin => {
const requiredPlugin = require(plugin.resolve)
if (_.isFunction(requiredPlugin.mutateSource)) {
return requiredPlugin.mutateSource(
{
markdownNode,
files: fileNodes,
getNode,
reporter,
cache,
},
plugin.pluginOptions
)
} else {
return Promise.resolve()
})
const markdownAST = remark.parse(markdownNode.internal.content)

if (pathPrefix) {
// Ensure relative links include `pathPrefix`
visit(markdownAST, [`link`, `definition`], node => {
if (
node.url &&
node.url.startsWith(`/`) &&
!node.url.startsWith(`//`)
) {
node.url = withPathPrefix(node.url, pathPrefix)
}
})
const markdownAST = remark.parse(markdownNode.internal.content)

if (pathPrefix) {
// Ensure relative links include `pathPrefix`
visit(markdownAST, [`link`, `definition`], node => {
if (
node.url &&
node.url.startsWith(`/`) &&
!node.url.startsWith(`//`)
) {
node.url = withPathPrefix(node.url, pathPrefix)
}
})
}
}

// source => parse (can order parsing for dependencies) => typegen
//
// source plugins identify nodes, provide id, initial parse, know
// when nodes are created/removed/deleted
// get passed cached DataTree and return list of clean and dirty nodes.
// Also get passed `dirtyNodes` function which they can call with an array
// of node ids which will then get re-parsed and the inferred schema
// recreated (if inferring schema gets too expensive, can also
// cache the schema until a query fails at which point recreate the
// schema).
//
// parse plugins take data from source nodes and extend it, never mutate
// it. Freeze all nodes once done so typegen plugins can't change it
// this lets us save off the DataTree at that point as well as create
// indexes.
//
// typegen plugins identify further types of data that should be lazily
// computed due to their expense, or are hard to infer graphql type
// (markdown ast), or are need user input in order to derive e.g.
// markdown headers or date fields.
//
// wrap all resolve functions to (a) auto-memoize and (b) cache to disk any
// resolve function that takes longer than ~10ms (do research on this
// e.g. how long reading/writing to cache takes), and (c) track which
// queries are based on which source nodes. Also if connection of what
// which are always rerun if their underlying nodes change..
//
// every node type in DataTree gets a schema type automatically.
// typegen plugins just modify the auto-generated types to add derived fields
// as well as computationally expensive fields.
if (process.env.NODE_ENV !== `production` || !fileNodes) {
fileNodes = getNodesByType(`File`)
// source => parse (can order parsing for dependencies) => typegen
//
// source plugins identify nodes, provide id, initial parse, know
// when nodes are created/removed/deleted
// get passed cached DataTree and return list of clean and dirty nodes.
// Also get passed `dirtyNodes` function which they can call with an array
// of node ids which will then get re-parsed and the inferred schema
// recreated (if inferring schema gets too expensive, can also
// cache the schema until a query fails at which point recreate the
// schema).
//
// parse plugins take data from source nodes and extend it, never mutate
// it. Freeze all nodes once done so typegen plugins can't change it
// this lets us save off the DataTree at that point as well as create
// indexes.
//
// typegen plugins identify further types of data that should be lazily
// computed due to their expense, or are hard to infer graphql type
// (markdown ast), or are need user input in order to derive e.g.
// markdown headers or date fields.
//
// wrap all resolve functions to (a) auto-memoize and (b) cache to disk any
// resolve function that takes longer than ~10ms (do research on this
// e.g. how long reading/writing to cache takes), and (c) track which
// queries are based on which source nodes. Also if connection of what
// which are always rerun if their underlying nodes change..
//
// every node type in DataTree gets a schema type automatically.
// typegen plugins just modify the auto-generated types to add derived fields
// as well as computationally expensive fields.
if (process.env.NODE_ENV !== `production` || !fileNodes) {
fileNodes = getNodesByType(`File`)
}
// Use Bluebird's Promise function "each" to run remark plugins serially.
await Promise.each(pluginOptions.plugins, plugin => {
const requiredPlugin = require(plugin.resolve)
if (_.isFunction(requiredPlugin)) {
return requiredPlugin(
{
markdownAST,
markdownNode,
getNode,
files: fileNodes,
pathPrefix,
reporter,
cache,
},
plugin.pluginOptions
)
} else {
return Promise.resolve()
}
// Use Bluebird's Promise function "each" to run remark plugins serially.
await Promise.each(pluginOptions.plugins, plugin => {
const requiredPlugin = require(plugin.resolve)
if (_.isFunction(requiredPlugin)) {
return requiredPlugin(
{
markdownAST,
markdownNode,
getNode,
files: fileNodes,
pathPrefix,
reporter,
cache,
},
plugin.pluginOptions
)
} else {
return Promise.resolve()
}
})
resolve(markdownAST)
})

return markdownAST
}

async function getHeadings(markdownNode) {
Expand Down

0 comments on commit b7d4656

Please sign in to comment.