Skip to content

Instantly share code, notes, and snippets.

@dariocravero
Created February 2, 2020 22:35
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save dariocravero/65d2530ff39947add902e771f02ed2db to your computer and use it in GitHub Desktop.
Save dariocravero/65d2530ff39947add902e771f02ed2db to your computer and use it in GitHub Desktop.

This is how we do metadata in Hasura instead of going through the migrations mechanism in v1. It's interesting that the Hasura team is following a similar approach https://youtu.be/ik3vd0Ju9sw?t=1636 with the next iteration of the metadata!

This process really helps us model our data and the relations way faster. In particular when it comes down to renaming things or removing many permissions at once.

I should've totally shared this earlier :/.

What we do is that we write our metadata in YAML files inside a metadata folder and split them by types. See metadata/tables/users.yaml at the end for an example.

One thing we're doing different though is splitting the tables into separate yaml files by table. It helped us make sense of the larger files. What are your thoughts on this?

We also played with yaml's anchors, etc. Still split on those. They helped a bit when writing things by hand and it added clarity to similar permissions but I can see how they would add more complexity when trying to automate things. What do you think?

The script is written in node. Install the dependencies with npm install and run it like:

node metadata.js setup
node metadata.js clear
node metadata.js backup file.json
node metadata.js rollback file.json

I'd be interested to try out the Hasura version and hopefully dump this as the one the team did works through the Hasura console!

One thing I'm interested to know is what happens when there's a problem applying a migration that is linked with a change in metadata. We did a bash script (see it at the end of the gist) that backs up the metadata before applying migrations and rolls back if it didn't work, but it's not very robust, in particular because it happens without Hasura knowing and there's a bit of time in which there's a downtime.

Does Hasura's version apply it all in a transaction?

#!/usr/bin/env bash
echo "> metadata backup"
yarn metadata backup backup.json
echo "> metadata backup ok"
echo "> metadata clear"
yarn metadata clear
echo "> metadata clear ok"
echo "> apply"
if yarn migrate apply; then
echo "> migrate apply ok"
echo "> metadata setup"
if yarn metadata setup; then
echo "> metadata setup ok"
else
echo "!> metadata rollback"
yarn metadata rollback backup.json
echo "!> metadata rollback ok"
fi
else
echo "!> metadata rollback"
yarn metadata rollback backup.json
echo "!> metadata rollback ok"
fi
let { promises: fs } = require('fs')
let delve = require('dlv')
let fetch = require('fetchu')
let path = require('path')
let YAML = require('yaml')
let flattenPermissionColumns = item => {
if (item.permission && Array.isArray(item.permission.columns)) {
item.permission.columns = item.permission.columns.flat(Infinity)
}
return item
}
function fillGapsTables(item) {
let res = {
array_relationships: [],
configuration: {
custom_column_names: {},
custom_root_fields: {
delete: null,
insert: null,
select: null,
select_aggregate: null,
select_by_pk: null,
update: null,
},
},
computed_fields: [],
delete_permissions: [],
event_triggers: [],
insert_permissions: [],
is_enum: false,
object_relationships: [],
select_permissions: [],
update_permissions: [],
...item,
}
res.select_permissions = res.select_permissions
.map(item => ({
// comment: null,
...item,
permission: {
allow_aggregations: true,
filter: {},
columns: [],
computed_fields: [],
...item.permission,
},
}))
.map(flattenPermissionColumns)
res.delete_permissions = res.delete_permissions
.map(item => ({
// comment: null,
...item,
permission: {
filter: {},
...item.permission,
},
}))
.map(flattenPermissionColumns)
res.insert_permissions = res.insert_permissions
.map(item => ({
// comment: null,
...item,
permission: {
check: {},
set: {},
columns: [],
...item.permission,
},
}))
.map(flattenPermissionColumns)
res.update_permissions = res.update_permissions
.map(item => ({
// comment: null,
...item,
permission: {
filter: {},
set: {},
columns: [],
...item.permission,
},
}))
.map(flattenPermissionColumns)
return res
}
let FILL_GAPS = {
functions: item => item,
remote_schemas: item => item,
tables: fillGapsTables,
}
async function getConfig(type) {
try {
let root = path.join(__dirname, 'metadata', type)
let files = await fs.readdir(root)
return await Promise.all(
files
.filter(f => f.endsWith('.yaml'))
.map(async f =>
FILL_GAPS[type](
YAML.parse(await fs.readFile(path.join(root, f), 'utf8'))
)
)
)
} catch (error) {
if (error.code !== 'ENOENT') {
console.error(type, error)
}
return []
}
}
let METADATA_API = `${process.env.HASURA_GRAPHQL_ENDPOINT}/v1/query`
async function query(type, args = {}) {
if (process.env.DEBUG) {
console.log(JSON.stringify(args, null, ' '))
}
return await fetch(METADATA_API, {
method: 'POST',
body: JSON.stringify({ type, args }),
headers: {
'content-type': 'application/json',
'x-hasura-admin-secret': process.env.HASURA_GRAPHQL_ADMIN_SECRET,
},
}).then(r => r.json())
}
async function _rollback(backup) {
console.log('> rollback')
try {
await query('replace_metadata', backup)
console.log(' (done)')
} catch (error) {
console.error(' (fail)')
console.error(error)
}
}
async function rollback(file) {
console.log(`> Rollback from ${file}`)
let backup = JSON.parse(await fs.readFile(file, 'utf8'))
await _rollback(backup)
console.log(' (done)')
}
async function _backup() {
return await query('export_metadata')
}
async function backup(file) {
console.log(`> Backup to ${file}`)
await fs.writeFile(file, JSON.stringify(await _backup(), null, ' '))
console.log(` (done)`)
}
async function clear() {
console.log('> clear metadata')
console.log(await query('clear_metadata'))
console.log(' (done)')
}
let ONLY_TRACK_TABLES_KEYS_TO_SKIP = [
'array_relationships',
'event_triggers',
'delete_permissions',
'insert_permissions',
'select_permissions',
'update_permissions',
'object_relationships',
]
async function setup({ dryRun = false, onlyTrackTables = false }) {
let backup = await _backup()
let next = null
try {
await clear()
console.log('> replace metadata')
next = {
version: 2,
tables: await getConfig('tables'),
}
if (onlyTrackTables) {
console.log(
'>> only tracking tables, skipping permissions, triggers, etc.'
)
next.tables.forEach(table => {
ONLY_TRACK_TABLES_KEYS_TO_SKIP.forEach(key => {
table[key] = []
})
})
} else {
next.functions = await getConfig('functions')
next.remote_schemas = await getConfig('remote_schemas')
}
if (process.env.DEBUG) {
await fs.writeFile(
'metadata-debug.json',
JSON.stringify(next, null, ' ')
)
if (dryRun) {
process.exit(0)
}
}
console.log(await query('replace_metadata', next))
console.log(' (done)')
} catch (error) {
console.error('! something went wrong')
// Error: {"path":"$.args.tables[14].select_permissions[0].permission.filter","error":"\"subscriber_dependent\" does not exist","code":"not-exists"}
if (next && /{/.test(error.message)) {
let parsedError = JSON.parse(error.message)
if (parsedError.path) {
let [, type, index, innerPath] = parsedError.path.match(
/(tables|functions|remote_schemas)\[(\d+)\]\.?(.+)?/
)
console.log('type', type, 'index', index)
let thing = next[type][parseInt(index, 10)]
let name = thing.name || thing.table
console.error(`Error ${parsedError.code} on "${name}" "${type}".`)
console.error(parsedError.error)
if (innerPath) {
console.error(`JSON path "${innerPath}"`)
console.error(
'Content',
JSON.stringify(delve(thing, innerPath), null, ' ')
)
}
}
console.error(error.message)
} else {
console.error(error)
}
_rollback(backup)
}
}
async function run() {
if (process.env.DEBUG) {
console.log('> Metadata API: ', METADATA_API)
}
switch (process.argv[2]) {
case 'backup': {
await backup(process.argv[3])
break
}
case 'rollback': {
await rollback(process.argv[3])
break
}
case 'setup': {
await setup({
dryRun: process.argv.includes('--dryRun'),
onlyTrackTables: process.argv.includes('--onlyTrackTables'),
})
break
}
case 'clear': {
await clear()
break
}
default: {
console.log(
`Usage:\n node metadata.js setup\n node metadata.js clear\n node metadata.js backup file.json\n node metadata.js rollback file.json`
)
}
}
}
run().catch(console.error.bind(console))
--- metadata/tables/users.yaml
---
table: users
#############
# relations #
#############
array_relationships:
- name: company_users
using:
foreign_key_constraint_on:
column: user_id
table: company_users
object_relationships:
- name: profile
using:
foreign_key_constraint_on: profile_id
###############
# permissions #
###############
insert_permissions:
- role: company-admin
permission:
columns:
- cognito_id
- email
- has_accepted_terms
- phone_number
- profile_id
select_permissions:
- role: company-admin
permission: &select_permissions:company-admin:permission
allow_aggregations: true
columns:
- can_be_support
- cognito_id
- created_at
- email
- has_accepted_terms
- id
- phone_number
- profile_id
- updated_at
filter:
_and:
- &is-not-deleted
deleted_at:
_is_null: true
- _or:
# user of current user
- &my-user
id:
_eq: X-Hasura-User-Id
- company_users:
# user is (super)admin of a company I'm an admin of
&company-im-an-admin-of
company_id:
_in: X-Hasura-Companies
update_permissions:
- role: company-admin
permission:
columns:
- deleted_at
- email
- has_accepted_terms
- phone_number
filter:
_and:
- *is-not-deleted
- _or:
# user of current user
- *my-user
############
# events #
############
event_triggers:
- definition:
enable_manual: true
update:
columns:
- email
headers:
- name: x-api-key
value_from_env: HASURA_EVENTS_KEY
name: sync-user-cognito
retry_conf:
interval_sec: 10
num_retries: 0
timeout_sec: 60
webhook_from_env: HASURA_EVENTS_URL
{
"name": "data",
"version": "0.0.1",
"private": true,
"scripts": {
"hasura": "env-cmd -f .env hasura console",
"migrate": "env-cmd -f .env hasura migrate",
"setup": "yarn migrate apply && yarn metadata setup && yarn sample:data",
"metadata": "env-cmd -f .env node metadata.js",
"design": "yarn metadata clear && yarn migrate apply --down all && yarn migrate apply && yarn metadata setup --onlyTrackTables",
"start": "docker-compose pull && docker-compose up",
"down": "docker-compose down"
},
"dependencies": {
"dlv": "^1.1.3",
"env-cmd": "^9.0.3",
"fetchu": "^4.1.0",
"yaml": "^1.7.2"
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment