var bigtable = gcloud.bigtable();
bigtable.getZones(function(err, zones, apiResponse) {});
var zone = bigtable.zone('my-zone');
zone.getClusters(function(err, clusters, apiResponse) {});
zone.createCluster(clusterOptions, function(err, cluster, apiResponse) {});
cluster.getMetadata(function (err, metadata, apiResponse) {});
cluster.setMetadata(metaData, function(err, metadata, apiResponse) {});
cluster.delete(function (err, apiResponse) {});
cluster.restore(function (err, apiResponse) {});
cluster.createTable('my-table', function(err, table, apiResponse) {});
// or
cluster.createTable(tableOptions, function(err, table, apiResponse) {});
var myTable = cluster.table('my-table');
cluster.getTables(function (err, tables, apiResponse) {});
table.getMetadata(function (err, tableSchema, apiResponse) {});
table.delete(function (err, table, apiResponse) {});
table.rename('awesome-table', function (err, table, apiResponse) {});
table.createFamily('user', function (err, family, apiResponse) {});
var userFamily = table.family('user');
userFamily.setMetadata(metaData, function(err, family, apiResponse) {});
userFamily.delete(function(err, apiResponse) {});
If a callback is not provided a stream will be returned
table.getSampleKeys(function (err, keys, apiResponse) {});
If callback is not provided a stream is returned
var rowOptions = {
prefix: 'com.google.'
};
table.getRows(rowOptions, function(err, rows, apiResponse) {});
rowOptions
would be filters used to determine which rows to delete
table.deleteRows(rowOptions, function(err, apiResponse) {});
table.createRow(rowData, function(err, row, apiResponse) {});
table.createRows([rowData], function(err, rows, apiResponse) {});
var myRow = table.row('my-row');
myRow.set('user:name', 'stephen', function(err, row, apiResponse) {});
// or for multiple columns
var rowData = {
user: { // family
name: 'stephen', // column
age: 99 // column
}
};
myRow.set(rowData, function(err, row, apiResponse) {});
myRow.get(['user:name'], function(err, columns, apiResponse) {});
myRow.delete(['user:name'], function(err, columns, apiResponse) {});
myRow.delete(function(err, columns, apiResponse) {});
This should not be confused with the Table#family
which allows you to update/delete families for the entire Table.
var myFamily = myRow.family('user');
// get column
myFamily.get('name', function(err, name, apiResponse) {});
// set column(s)
myFamily.set('name', 'peter', function(err, family, apiResponse) {});
// or
myFamily.set({ name: 'peter' }, function (err, family, apiResponse) {});
// delete column(s)
myFamily.delete(['name'], function(err, family, apiResponse) {});
// delete all columns associated with family
myFamily.delete(function(err, apiResponse) {});
The closest to API documentation I can find is the .proto files an Go client. As far as I can tell the zone and cluster are required and I wanted to supply defaults since I imagine some people will go directly for the table.
vs.
It might also be worth noting that all APIs involving zones/clusters involve an entirely different grpc client anyways.
Per bigtable_cluster_service.proto it looks like all that is needed is your project id.
Nope, which is sort of why I wanted to be able to set a default zone and cluster, but we could easily just pass them in or they could chain
.zone().cluster().getTables()
That's a thing! I'm cool with changing it to restore, it was just noted as
undelete
within the proto file.There's a GetTable rpc defined and the comments around it state that it returns the schema of the table.
Still need to work out whether or not this is feasible and what filters are available, but previously we talked about allowing an option to filter the results like..
b.s.