Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/src/packages/database/postgres-blobs.coffee
Views: 1107
#########################################################################1# This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.2# License: MS-RSL – see LICENSE.md for details3#########################################################################45###6PostgreSQL -- implementation of queries needed for storage and managing blobs,7including backups, integration with google cloud storage, etc.89COPYRIGHT : (c) 2017 SageMath, Inc.10LICENSE : MS-RSL11###1213# Bucket used for cheaper longterm storage of blobs (outside of PostgreSQL).14# NOTE: We should add this to site configuration, and have it get read once when first15# needed and cached. Also it would be editable in admin account settings.16# If this env variable begins with a / it is assumed to be a path in the file system,17# e.g., a remote mount (in practice, we are using gcsfuse to mount gcloud buckets).18# If it is gs:// then it is a google cloud storage bucket.19# 2025-01-10: noticed rarely this variable is not set, at least not initially after startup.20# Hardcoding the path, which has never changed anyways.21# Maybe https://github.com/nodejs/help/issues/361822COCALC_BLOB_STORE_FALLBACK = "/blobs"23COCALC_BLOB_STORE = String(process.env.COCALC_BLOB_STORE ? COCALC_BLOB_STORE_FALLBACK)2425async = require('async')26zlib = require('zlib')27fs = require('fs')2829misc_node = require('@cocalc/backend/misc_node')3031{defaults} = misc = require('@cocalc/util/misc')32required = defaults.required3334{expire_time, one_result, all_results} = require('./postgres-base')35{delete_patches} = require('./postgres/delete-patches')36blobs = require('./postgres/blobs')3738{filesystem_bucket} = require('./filesystem-bucket')3940# some queries do searches, which could take a bit. we give them 5 minutes …41TIMEOUT_LONG_S = 3004243exports.extend_PostgreSQL = (ext) -> class PostgreSQL extends ext44save_blob: (opts) =>45opts = defaults opts,46uuid : undefined # uuid=sha1-based id coming from blob47blob : required # unless check=true, we assume misc_node.uuidsha1(opts.blob) == opts.uuid;48# blob must be a string or Buffer49ttl : 0 # object in blobstore will have *at least* this ttl in seconds;50# if there is already something in blobstore with longer ttl, we leave it;51# infinite ttl = 0.52project_id : undefined # the id of the project that is saving the blob53account_id : undefined # the id of the user that is saving the blob54check : false # if true, will give error if misc_node.uuidsha1(opts.blob) != opts.uuid55compress : undefined # optional compression to use: 'gzip', 'zlib'; only used if blob not already in db.56level : -1 # compression level (if compressed) -- see https://github.com/expressjs/compression#level57cb : required # cb(err, ttl actually used in seconds); ttl=0 for infinite ttl58if not Buffer.isBuffer(opts.blob)59# CRITICAL: We assume everywhere below that opts.blob is a60# buffer, e.g., in the .toString('hex') method!61opts.blob = Buffer.from(opts.blob)62if not opts.uuid?63opts.uuid = misc_node.uuidsha1(opts.blob)64else if opts.check65uuid = misc_node.uuidsha1(opts.blob)66if uuid != opts.uuid67opts.cb("the sha1 uuid (='#{uuid}') of the blob must equal the given uuid (='#{opts.uuid}')")68return69if not misc.is_valid_uuid_string(opts.uuid)70opts.cb("uuid is invalid")71return72dbg = @_dbg("save_blob(uuid='#{opts.uuid}')")73dbg()74rows = ttl = undefined75async.series([76(cb) =>77@_query78query : 'SELECT expire FROM blobs'79where : "id = $::UUID" : opts.uuid80cb : (err, x) =>81rows = x?.rows; cb(err)82(cb) =>83if rows.length == 0 and opts.compress84dbg("compression requested and blob not already saved, so we compress blob")85switch opts.compress86when 'gzip'87zlib.gzip opts.blob, {level:opts.level}, (err, blob) =>88opts.blob = blob; cb(err)89when 'zlib'90zlib.deflate opts.blob, {level:opts.level}, (err, blob) =>91opts.blob = blob; cb(err)92else93cb("compression format '#{opts.compress}' not implemented")94else95cb()96(cb) =>97if rows.length == 098dbg("nothing in DB, so we insert the blob.")99ttl = opts.ttl100@_query101query : "INSERT INTO blobs"102values :103id : opts.uuid104blob : '\\x'+opts.blob.toString('hex')105project_id : opts.project_id106account_id : opts.account_id107count : 0108size : opts.blob.length109created : new Date()110compress : opts.compress111expire : if ttl then expire_time(ttl)112cb : cb113else114dbg("blob already in the DB, so see if we need to change the expire time")115@_extend_blob_ttl116expire : rows[0].expire117ttl : opts.ttl118uuid : opts.uuid119cb : (err, _ttl) =>120ttl = _ttl; cb(err)121(cb) =>122# double check that the blob definitely exists and has correct expire123# See discussion at https://github.com/sagemathinc/cocalc/issues/7715124# The problem is that maybe with VERY low probability somehow we extend125# the blob ttl at the same time that we're deleting blobs and the extend126# is too late and does an empty update.127@_query128query : 'SELECT expire FROM blobs'129where : "id = $::UUID" : opts.uuid130cb : (err, x) =>131if err132cb(err)133return134# some consistency checks135rows = x?.rows136if rows.length == 0137cb("blob got removed while saving it")138return139if !opts.ttl and rows[0].expire140cb("blob should have infinite ttl but it has expire set")141return142cb()143144], (err) => opts.cb(err, ttl))145146# Used internally by save_blob to possibly extend the expire time of a blob.147_extend_blob_ttl : (opts) =>148opts = defaults opts,149expire : undefined # what expire is currently set to in the database150ttl : required # requested ttl -- extend expire to at least this151uuid : required152cb : required # (err, effective ttl (with 0=oo))153if not misc.is_valid_uuid_string(opts.uuid)154opts.cb("uuid is invalid")155return156if not opts.expire157# ttl already infinite -- nothing to do158opts.cb(undefined, 0)159return160new_expire = ttl = undefined161if opts.ttl162# saved ttl is finite as is requested one; change in DB if requested is longer163z = expire_time(opts.ttl)164if z > opts.expire165new_expire = z166ttl = opts.ttl167else168ttl = (opts.expire - new Date())/1000.0169else170# saved ttl is finite but requested one is infinite171ttl = new_expire = 0172if new_expire?173# change the expire time for the blob already in the DB174@_query175query : 'UPDATE blobs'176where : "id = $::UUID" : opts.uuid177set : "expire :: TIMESTAMP " : if new_expire == 0 then undefined else new_expire178cb : (err) => opts.cb(err, ttl)179else180opts.cb(undefined, ttl)181182get_blob: (opts) =>183opts = defaults opts,184uuid : required185save_in_db : false # if true and blob isn't in DB and is only in gcloud, copies to local DB186# (for faster access e.g., 20ms versus 5ms -- i.e., not much faster; gcloud is FAST too.)187touch : true188cb : required # cb(err) or cb(undefined, blob_value) or cb(undefined, undefined) in case no such blob189if not misc.is_valid_uuid_string(opts.uuid)190opts.cb("uuid is invalid")191return192x = undefined193blob = undefined194async.series([195(cb) =>196@_query197query : "SELECT expire, blob, gcloud, compress FROM blobs"198where : "id = $::UUID" : opts.uuid199cb : one_result (err, _x) =>200x = _x; cb(err)201(cb) =>202if not x?203# nothing to do -- blob not in db (probably expired)204cb()205else if x.expire and x.expire <= new Date()206# the blob already expired -- background delete it207@_query # delete it (but don't wait for this to finish)208query : "DELETE FROM blobs"209where : "id = $::UUID" : opts.uuid210cb()211else if x.blob?212# blob not expired and is in database213blob = x.blob214cb()215else if x.gcloud216if not COCALC_BLOB_STORE?217# see comment https://github.com/sagemathinc/cocalc/pull/8110218COCALC_BLOB_STORE = COCALC_BLOB_STORE_FALLBACK219# blob not available locally, but should be in a Google cloud storage bucket -- try to get it220# NOTE: we now ignore the actual content of x.gcloud -- we don't support spreading blobs221# across multiple buckets... as it isn't needed because buckets are infinite, and it222# is potentially confusing to manage.223@blob_store().read224name : opts.uuid225cb : (err, _blob) =>226if err227cb(err)228else229blob = _blob230cb()231if opts.save_in_db232# also save in database so will be faster next time (again, don't wait on this)233@_query # delete it (but don't wait for this to finish)234query : "UPDATE blobs"235set : {blob : blob}236where : "id = $::UUID" : opts.uuid237else238# blob not local and not in gcloud -- this shouldn't happen239# (just view this as "expired" by not setting blob)240cb()241(cb) =>242if not blob? or not x?.compress?243cb(); return244# blob is compressed -- decompress it245switch x.compress246when 'gzip'247zlib.gunzip blob, (err, _blob) =>248blob = _blob; cb(err)249when 'zlib'250zlib.inflate blob, (err, _blob) =>251blob = _blob; cb(err)252else253cb("compression format '#{x.compress}' not implemented")254], (err) =>255opts.cb(err, blob)256if blob? and opts.touch257# blob was pulled from db or gcloud, so note that it was accessed (updates a counter)258@touch_blob(uuid : opts.uuid)259)260261touch_blob: (opts) =>262opts = defaults opts,263uuid : required264cb : undefined265if not misc.is_valid_uuid_string(opts.uuid)266opts.cb?("uuid is invalid")267return268@_query269query : "UPDATE blobs SET count = count + 1, last_active = NOW()"270where : "id = $::UUID" : opts.uuid271cb : opts.cb272273blob_store: (bucket) =>274if not bucket275bucket = COCALC_BLOB_STORE276# File system -- could be a big NFS volume, remotely mounted gcsfuse, or just277# a single big local file system -- etc. -- we don't care.278return filesystem_bucket(name: bucket)279280# Uploads the blob with given sha1 uuid to gcloud storage, if it hasn't already281# been uploaded there. Actually we copy to a directory, which uses gcsfuse to282# implicitly upload to gcloud...283copy_blob_to_gcloud: (opts) =>284opts = defaults opts,285uuid : required # uuid=sha1-based uuid coming from blob286bucket : COCALC_BLOB_STORE # name of bucket287force : false # if true, upload even if already uploaded288remove : false # if true, deletes blob from database after successful upload to gcloud (to free space)289cb : undefined # cb(err)290dbg = @_dbg("copy_blob_to_gcloud(uuid='#{opts.uuid}')")291dbg()292if not misc.is_valid_uuid_string(opts.uuid)293dbg("invalid uuid")294opts.cb?("uuid is invalid")295return296if not opts.bucket297opts.bucket = COCALC_BLOB_STORE_FALLBACK298locals =299x: undefined300async.series([301(cb) =>302dbg("get blob info from database")303@_query304query : "SELECT blob, gcloud FROM blobs"305where : "id = $::UUID" : opts.uuid306cb : one_result (err, x) =>307locals.x = x308if err309cb(err)310else if not x?311cb('no such blob')312else if not x.blob and not x.gcloud313cb('blob not available -- this should not be possible')314else if not x.blob and opts.force315cb("blob can't be re-uploaded since it was already deleted")316else317cb()318(cb) =>319if (locals.x.gcloud? and not opts.force) or not locals.x.blob?320dbg("already uploaded -- don't need to do anything; or already deleted locally")321cb(); return322# upload to Google cloud storage323locals.bucket = @blob_store(opts.bucket)324locals.bucket.write325name : opts.uuid326content : locals.x.blob327cb : cb328(cb) =>329if (locals.x.gcloud? and not opts.force) or not locals.x.blob?330# already uploaded -- don't need to do anything; or already deleted locally331cb(); return332dbg("read blob back and compare") # -- we do *NOT* trust GCS with such important data333locals.bucket.read334name : opts.uuid335cb : (err, data) =>336if err337cb(err)338else if not locals.x.blob.equals(data)339dbg("FAILED!")340cb("BLOB write to GCS failed check!")341else342dbg("check succeeded")343cb()344(cb) =>345if not locals.x.blob?346# no blob in db; nothing further to do.347cb()348else349# We successful upload to gcloud -- set locals.x.gcloud350set = {gcloud: opts.bucket}351if opts.remove352set.blob = null # remove blob content from database to save space353@_query354query : "UPDATE blobs"355where : "id = $::UUID" : opts.uuid356set : set357cb : cb358], (err) => opts.cb?(err))359360###361Backup limit blobs that previously haven't been dumped to blobs, and put them in362a tarball in the given path. The tarball's name is the time when the backup starts.363The tarball is compressed using gzip compression.364365db._error_thresh=1e6; db.backup_blobs_to_tarball(limit:10000,path:'/backup/tmp-blobs',repeat_until_done:60, cb:done())366367I have not written code to restore from these tarballs. Assuming the database has been restored,368so there is an entry in the blobs table for each blob, it would suffice to upload the tarballs,369then copy their contents straight into the COCALC_BLOB_STORE, and that’s it.370If we don't have the blobs table in the DB, make dummy entries from the blob names in the tarballs.371###372backup_blobs_to_tarball: (opts) =>373opts = defaults opts,374limit : 10000 # number of blobs to backup375path : required # path where [timestamp].tar file is placed376throttle : 0 # wait this many seconds between pulling blobs from database377repeat_until_done : 0 # if positive, keeps re-call'ing this function until no more378# results to backup (pauses this many seconds between)379map_limit : 5380cb : undefined# cb(err, '[timestamp].tar')381dbg = @_dbg("backup_blobs_to_tarball(limit=#{opts.limit},path='#{opts.path}')")382join = require('path').join383dir = misc.date_to_snapshot_format(new Date())384target = join(opts.path, dir)385tarball = target + '.tar.gz'386v = undefined387to_remove = []388async.series([389(cb) =>390dbg("make target='#{target}'")391fs.mkdir(target, cb)392(cb) =>393dbg("get blobs that we need to back up")394@_query395query : "SELECT id FROM blobs"396where : "expire IS NULL and backup IS NOT true"397limit : opts.limit398timeout_s : TIMEOUT_LONG_S399cb : all_results 'id', (err, x) =>400v = x; cb(err)401(cb) =>402dbg("backing up #{v.length} blobs")403f = (id, cb) =>404@get_blob405uuid : id406touch : false407cb : (err, blob) =>408if err409dbg("ERROR! blob #{id} -- #{err}")410cb(err)411else if blob?412dbg("got blob #{id} from db -- now write to disk")413to_remove.push(id)414fs.writeFile join(target, id), blob, (err) =>415if opts.throttle416setTimeout(cb, opts.throttle*1000)417else418cb()419else420dbg("blob #{id} is expired, so nothing to be done, ever.")421cb()422async.mapLimit(v, opts.map_limit, f, cb)423(cb) =>424dbg("successfully wrote all blobs to files; now make tarball")425misc_node.execute_code426command : 'tar'427args : ['zcvf', tarball, dir]428path : opts.path429timeout : 3600430cb : cb431(cb) =>432dbg("remove temporary blobs")433f = (x, cb) =>434fs.unlink(join(target, x), cb)435async.mapLimit(to_remove, 10, f, cb)436(cb) =>437dbg("remove temporary directory")438fs.rmdir(target, cb)439(cb) =>440dbg("backup succeeded completely -- mark all blobs as backed up")441@_query442query : "UPDATE blobs"443set : {backup: true}444where : "id = ANY($)" : v445cb : cb446], (err) =>447if err448dbg("ERROR: #{err}")449opts.cb?(err)450else451dbg("done")452if opts.repeat_until_done and to_remove.length == opts.limit453f = () =>454@backup_blobs_to_tarball(opts)455setTimeout(f, opts.repeat_until_done*1000)456else457opts.cb?(undefined, tarball)458)459460###461Copied all blobs that will never expire to a google cloud storage bucket.462463errors={}; db.copy_all_blobs_to_gcloud(limit:500, cb:done(), remove:true, repeat_until_done_s:10, errors:errors)464###465copy_all_blobs_to_gcloud: (opts) =>466opts = defaults opts,467bucket : COCALC_BLOB_STORE468limit : 1000 # copy this many in each batch469map_limit : 1 # copy this many at once.470throttle : 0 # wait this many seconds between uploads471repeat_until_done_s : 0 # if nonzero, waits this many seconds, then calls this function again until nothing gets uploaded.472errors : undefined # object: used to accumulate errors -- if not given, then everything will terminate on first error473remove : false474cutoff : '1 month' # postgresql interval - only copy blobs to gcloud that haven't been accessed at least this long.475cb : required476dbg = @_dbg("copy_all_blobs_to_gcloud")477dbg()478# This query selects the blobs that will never expire, but have not yet479# been copied to Google cloud storage.480dbg("getting blob id's...")481@_query482query : 'SELECT id, size FROM blobs'483where : "expire IS NULL AND gcloud IS NULL and (last_active <= NOW() - INTERVAL '#{opts.cutoff}' OR last_active IS NULL)"484limit : opts.limit485timeout_s : TIMEOUT_LONG_S486## order_by : 'id' # this is not important and was causing VERY excessive load in production (due to bad query plannnig?!)487cb : all_results (err, v) =>488if err489dbg("fail: #{err}")490opts.cb(err)491else492n = v.length; m = 0493dbg("got #{n} blob id's")494f = (x, cb) =>495m += 1496k = m; start = new Date()497dbg("**** #{k}/#{n}: uploading #{x.id} of size #{x.size/1000}KB")498@copy_blob_to_gcloud499uuid : x.id500bucket : opts.bucket501remove : opts.remove502cb : (err) =>503dbg("**** #{k}/#{n}: finished -- #{err}; size #{x.size/1000}KB; time=#{new Date() - start}ms")504if err505if opts.error?506opts.errors[x.id] = err507else508cb(err)509if opts.throttle510setTimeout(cb, 1000*opts.throttle)511else512cb()513async.mapLimit v, opts.map_limit, f, (err) =>514dbg("finished this round -- #{err}")515if err and not opts.errors?516opts.cb(err)517return518if opts.repeat_until_done_s and v.length > 0519dbg("repeat_until_done triggering another round")520setTimeout((=> @copy_all_blobs_to_gcloud(opts)), opts.repeat_until_done_s*1000)521else522dbg("done : #{misc.to_json(opts.errors)}")523opts.cb(if misc.len(opts.errors) > 0 then opts.errors)524525blob_maintenance: (opts) =>526opts = defaults opts,527path : '/backup/blobs'528map_limit : 1529blobs_per_tarball : 10000530throttle : 0531cb : undefined532dbg = @_dbg("blob_maintenance()")533dbg()534async.series([535(cb) =>536dbg("maintain the patches and syncstrings")537@syncstring_maintenance538repeat_until_done : true539limit : 500540map_limit : opts.map_limit541delay : 1000 # 1s, since syncstring_maintence heavily loads db542cb : cb543(cb) =>544dbg("backup_blobs_to_tarball")545@backup_blobs_to_tarball546throttle : opts.throttle547limit : opts.blobs_per_tarball548path : opts.path549map_limit : opts.map_limit550repeat_until_done : 5551cb : cb552(cb) =>553dbg("copy_all_blobs_to_gcloud")554errors = {}555@copy_all_blobs_to_gcloud556limit : 1000557repeat_until_done_s : 5558errors : errors559remove : true560map_limit : opts.map_limit561throttle : opts.throttle562cb : (err) =>563if misc.len(errors) > 0564dbg("errors! #{misc.to_json(errors)}")565cb(err)566], (err) =>567opts.cb?(err)568)569570remove_blob_ttls: (opts) =>571opts = defaults opts,572uuids : required # uuid=sha1-based from blob573cb : required # cb(err)574@_query575query : "UPDATE blobs"576set : {expire: null}577where : "id::UUID = ANY($)" : (x for x in opts.uuids when misc.is_valid_uuid_string(x))578cb : opts.cb579580# If blob has been copied to gcloud, remove the BLOB part of the data581# from the database (to save space). If not copied, copy it to gcloud,582# then remove from database.583close_blob: (opts) =>584opts = defaults opts,585uuid : required # uuid=sha1-based from blob586bucket : COCALC_BLOB_STORE587cb : undefined # cb(err)588if not misc.is_valid_uuid_string(opts.uuid)589opts.cb?("uuid is invalid")590return591async.series([592(cb) =>593# ensure blob is in gcloud594@_query595query : 'SELECT gcloud FROM blobs'596where : 'id = $::UUID' : opts.uuid597cb : one_result 'gcloud', (err, gcloud) =>598if err599cb(err)600else if not gcloud601# not yet copied to gcloud storage602@copy_blob_to_gcloud603uuid : opts.uuid604bucket : opts.bucket605cb : cb606else607# copied already608cb()609(cb) =>610# now blob is in gcloud -- delete blob data in database611@_query612query : 'SELECT gcloud FROM blobs'613where : 'id = $::UUID' : opts.uuid614set : {blob: null}615cb : cb616], (err) => opts.cb?(err))617618619620###621# Syncstring maintenance622###623syncstring_maintenance: (opts) =>624opts = defaults opts,625age_days : 30 # archive patches of syncstrings that are inactive for at least this long626map_limit : 1 # how much parallelism to use627limit : 1000 # do only this many628repeat_until_done : true629delay : 0630cb : undefined631dbg = @_dbg("syncstring_maintenance")632dbg(opts)633syncstrings = undefined634async.series([635(cb) =>636dbg("determine inactive syncstring ids")637@_query638query : 'SELECT string_id FROM syncstrings'639where : [{'last_active <= $::TIMESTAMP' : misc.days_ago(opts.age_days)}, 'archived IS NULL', 'huge IS NOT TRUE']640limit : opts.limit641timeout_s : TIMEOUT_LONG_S642cb : all_results 'string_id', (err, v) =>643syncstrings = v644cb(err)645(cb) =>646dbg("archive patches for inactive syncstrings")647i = 0648f = (string_id, cb) =>649i += 1650console.log("*** #{i}/#{syncstrings.length}: archiving string #{string_id} ***")651@archivePatches652string_id : string_id653cb : (err) ->654if err or not opts.delay655cb(err)656else657setTimeout(cb, opts.delay)658async.mapLimit(syncstrings, opts.map_limit, f, cb)659], (err) =>660if err661opts.cb?(err)662else if opts.repeat_until_done and syncstrings.length == opts.limit663dbg("doing it again")664@syncstring_maintenance(opts)665else666opts.cb?()667)668669archivePatches: (opts) =>670try671await blobs.archivePatches({db:@, ...opts})672opts.cb?()673catch err674opts.cb?(err)675676unarchivePatches: (string_id) =>677await blobs.unarchivePatches({db:@, string_id:string_id})678679###680Export/import of syncstring history and info.681###682export_patches: (opts) =>683try684patches = await blobs.exportPatches(opts.string_id);685opts.cb(undefined, patches)686return patches687catch err688opts.cb(err)689690import_patches: (opts) =>691opts = defaults opts,692patches : required # array as exported by export_patches693string_id : undefined # if given, change the string_id when importing the patches to this694cb : undefined695patches = opts.patches696if patches.length == 0 # easy697opts.cb?()698return699if patches[0].id?700# convert from OLD RethinkDB format!701v = []702for x in patches703patch =704string_id : x.id[0]705time : new Date(x.id[1])706user_id : x.user707patch : x.patch708snapshot : x.snapshot709sent : x.sent710prev : x.prev711v.push(patch)712patches = v713# change string_id, if requested.714if opts.string_id?715for x in patches716x.string_id = opts.string_id717# We break into blocks since there is limit (about 65K) on718# number of params that can be inserted in a single query.719insert_block_size = 1000720f = (i, cb) =>721@_query722query : 'INSERT INTO patches'723values : patches.slice(insert_block_size*i, insert_block_size*(i+1))724conflict : 'ON CONFLICT DO NOTHING' # in case multiple servers (or this server) are doing this import at once -- this can and does happen sometimes.725cb : cb726async.mapSeries([0...patches.length/insert_block_size], f, (err) => opts.cb?(err))727728delete_blob: (opts) =>729opts = defaults opts,730uuid : required731cb : undefined732if not misc.is_valid_uuid_string(opts.uuid)733opts.cb?("uuid is invalid")734return735gcloud = undefined736dbg = @_dbg("delete_blob(uuid='#{opts.uuid}')")737async.series([738(cb) =>739dbg("check if blob in gcloud")740@_query741query : "SELECT gcloud FROM blobs"742where : "id = $::UUID" : opts.uuid743cb : one_result 'gcloud', (err, x) =>744gcloud = x745cb(err)746(cb) =>747if not gcloud or not COCALC_BLOB_STORE748cb()749return750dbg("delete from gcloud")751@blob_store(gcloud).delete752name : opts.uuid753cb : cb754(cb) =>755dbg("delete from local database")756@_query757query : "DELETE FROM blobs"758where : "id = $::UUID" : opts.uuid759cb : cb760], (err) => opts.cb?(err))761762763764765