Path: blob/main/components/content-service/pkg/storage/gcloud.go
2501 views
// Copyright (c) 2020 Gitpod GmbH. All rights reserved.1// Licensed under the GNU Affero General Public License (AGPL).2// See License.AGPL.txt in the project root for license information.34package storage56import (7"context"8"encoding/hex"9"errors"10"fmt"11"io"12"io/fs"13"net/http"14"os"15"os/exec"16"path/filepath"17"strings"18"sync"19"time"2021gcpstorage "cloud.google.com/go/storage"22validation "github.com/go-ozzo/ozzo-validation"23"github.com/opentracing/opentracing-go"24"golang.org/x/oauth2/google"25"golang.org/x/xerrors"26"google.golang.org/api/googleapi"27"google.golang.org/api/iterator"28"google.golang.org/api/option"2930"github.com/gitpod-io/gitpod/common-go/log"31"github.com/gitpod-io/gitpod/common-go/tracing"32config "github.com/gitpod-io/gitpod/content-service/api/config"33"github.com/gitpod-io/gitpod/content-service/pkg/archive"34)3536var _ DirectAccess = &DirectGCPStorage{}3738var validateExistsInFilesystem = validation.By(func(o interface{}) error {39s, ok := o.(string)40if !ok {41return xerrors.Errorf("field should be string")42}4344if s == "" {45// don't make this field required46return nil47}4849_, err := os.Stat(s)50return err51})5253// Validate checks if the GCloud storage GCPconfig is valid54func ValidateGCPConfig(c *config.GCPConfig) error {55return validation.ValidateStruct(c,56validation.Field(&c.CredentialsFile, validateExistsInFilesystem),57validation.Field(&c.Region, validation.Required),58validation.Field(&c.Project, validation.Required),59)60}6162// newDirectGCPAccess provides direct access to the remote storage system63func newDirectGCPAccess(cfg config.GCPConfig, stage config.Stage) (*DirectGCPStorage, error) {64if err := ValidateGCPConfig(&cfg); err != nil {65return nil, err66}6768return &DirectGCPStorage{69Stage: stage,70GCPConfig: cfg,71}, nil72}7374// DirectGCPStorage stores data in Google Cloud buckets, following a particular naming scheme75type DirectGCPStorage struct {76Username string77WorkspaceName string78InstanceID string79GCPConfig config.GCPConfig80Stage config.Stage8182client *gcpstorage.Client8384// ObjectAccess just exists so that we can swap out the stream access during testing85ObjectAccess func(ctx context.Context, btk, obj string) (io.ReadCloser, bool, error)86}8788// Validate checks if the GCloud storage is GCPconfigured properly89func (rs *DirectGCPStorage) Validate() error {90err := ValidateGCPConfig(&rs.GCPConfig)91if err != nil {92return err93}9495return validation.ValidateStruct(rs,96validation.Field(&rs.Username, validation.Required),97validation.Field(&rs.WorkspaceName, validation.Required),98validation.Field(&rs.Stage, validation.Required),99)100}101102// Init initializes the remote storage - call this before calling anything else on the interface103func (rs *DirectGCPStorage) Init(ctx context.Context, owner, workspace, instance string) (err error) {104//nolint:ineffassign105span, ctx := opentracing.StartSpanFromContext(ctx, "GCloudBucketRemotegcpStorage.Init")106defer tracing.FinishSpan(span, &err)107108rs.Username = owner109rs.WorkspaceName = workspace110rs.InstanceID = instance111112// now that we have all the information complete, validate if we're good to go113err = rs.Validate()114if err != nil {115return xerrors.Errorf("invalid GCloud remote storage GCPconfig: %w", err)116}117118client, err := newGCPClient(ctx, rs.GCPConfig)119if err != nil {120return err121}122rs.client = client123124if rs.ObjectAccess == nil {125rs.ObjectAccess = rs.defaultObjectAccess126}127128return nil129}130131// EnsureExists makes sure that the remote storage location exists and can be up- or downloaded from132func (rs *DirectGCPStorage) EnsureExists(ctx context.Context) (err error) {133return gcpEnsureExists(ctx, rs.client, rs.bucketName(), rs.GCPConfig)134}135136func gcpEnsureExists(ctx context.Context, client *gcpstorage.Client, bucketName string, gcpConfig config.GCPConfig) (err error) {137//nolint:ineffassign138span, ctx := opentracing.StartSpanFromContext(ctx, "GCloudBucketRemotegcpStorage.EnsureExists")139defer tracing.FinishSpan(span, &err)140141if client == nil {142return xerrors.Errorf("no gcloud client available - did you call Init()?")143}144145hdl := client.Bucket(bucketName)146_, err = hdl.Attrs(ctx)147if err == nil {148// bucket exists and everything is fine - we're done here149return150}151if err != nil && err != gcpstorage.ErrBucketNotExist {152return xerrors.Errorf("cannot ensure storage exists: %w", err)153}154155log.WithField("bucketName", bucketName).Debug("Creating bucket")156err = hdl.Create(ctx, gcpConfig.Project, &gcpstorage.BucketAttrs{157Location: gcpConfig.Region,158})159if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusConflict && strings.Contains(strings.ToLower(e.Message), "you already own") {160// Looks like we had a bucket creation race and lost.161// That's ok - at least the bucket exists now and is still owned by us.162} else if err != nil {163return xerrors.Errorf("cannot create bucket: %w", err)164}165166return nil167}168169func (rs *DirectGCPStorage) defaultObjectAccess(ctx context.Context, bkt, obj string) (io.ReadCloser, bool, error) {170if rs.client == nil {171return nil, false, xerrors.Errorf("no gcloud client available - did you call Init()?")172}173174objHandle := rs.client.Bucket(bkt).Object(obj)175rc, err := objHandle.NewReader(ctx)176if err != nil {177return nil, false, err178}179180return rc, false, nil181}182183func (rs *DirectGCPStorage) download(ctx context.Context, destination string, bkt string, obj string, mappings []archive.IDMapping) (found bool, err error) {184//nolint:ineffassign185span, ctx := opentracing.StartSpanFromContext(ctx, "download")186span.SetTag("gcsBkt", bkt)187span.SetTag("gcsObj", obj)188defer tracing.FinishSpan(span, &err)189190backupDir, err := os.MkdirTemp("", "backup-")191if err != nil {192return true, err193}194defer os.RemoveAll(backupDir)195196var wg sync.WaitGroup197198wg.Add(1)199backupSpan := opentracing.StartSpan("downloadBackup", opentracing.ChildOf(span.Context()))200201go func() {202defer wg.Done()203204sa := ""205if rs.GCPConfig.CredentialsFile != "" {206sa = fmt.Sprintf(`-o "Credentials:gs_service_key_file=%v"`, rs.GCPConfig.CredentialsFile)207}208209args := fmt.Sprintf(`gsutil -q -m %v\210-o "GSUtil:sliced_object_download_max_components=8" \211-o "GSUtil:parallel_thread_count=1" \212cp gs://%s %s`, sa, filepath.Join(bkt, obj), backupDir)213214log.WithField("flags", args).Debug("gsutil flags")215216cmd := exec.Command("/bin/bash", []string{"-c", args}...)217var out []byte218out, err = cmd.CombinedOutput()219if err != nil {220log.WithError(err).WithField("out", string(out)).Error("unexpected error downloading file to GCS using gsutil")221err = xerrors.Errorf("unexpected error downloading backup")222return223}224}()225226wg.Wait()227tracing.FinishSpan(backupSpan, &err)228229rc, err := os.Open(filepath.Join(backupDir, obj))230if err != nil {231return true, err232}233defer rc.Close()234235err = extractTarbal(ctx, destination, rc, mappings)236if err != nil {237return true, err238}239240if err := rs.fixLegacyFilenames(ctx, destination); err != nil {241return true, err242}243244return true, nil245}246247/* tar files produced by the previous sync process contain their workspace ID in the filenames.248* This behavior is difficult for snapshot backups, thus ws-daemond does not do that. However,249* we need to be able to handle the "old" tar files, hence this legacy mode. See #1559.250*/251func (rs *DirectGCPStorage) fixLegacyFilenames(ctx context.Context, destination string) (err error) {252//nolint:staticcheck,ineffassign253span, ctx := opentracing.StartSpanFromContext(ctx, "fixLegacyFilenames")254defer tracing.FinishSpan(span, &err)255256legacyPath := filepath.Join(destination, rs.WorkspaceName)257if fi, err := os.Stat(legacyPath); errors.Is(err, fs.ErrNotExist) {258// legacy path does not exist, nothing to do here259return nil260} else if fi.IsDir() {261log.WithField("destination", destination).WithField("legacyPath", legacyPath).Info("Handling legacy backup")262/* legacy path exists and is a directory - move it's content and remove the legacy path.263*264* Using mv here is difficult as the wildcard expansion is done by the shell and not mv,265* thus we'd need to wrap the mv call in a sh call -> too many dependencies to the outside world.266*/267fis, err := os.ReadDir(legacyPath)268if err != nil {269return err270}271for _, fi := range fis {272src := filepath.Join(legacyPath, fi.Name())273dst := filepath.Join(destination, fi.Name())274log.WithField("src", src).WithField("dst", dst).Debug("moving file")275if err := os.Rename(src, dst); err != nil {276return xerrors.Errorf("mv %s %s: %s", src, dst, err)277}278}279280if err := os.Remove(legacyPath); err != nil {281return err282}283}284285return nil286}287288// Download takes the latest state from the remote storage and downloads it to a local path289func (rs *DirectGCPStorage) Download(ctx context.Context, destination string, name string, mappings []archive.IDMapping) (bool, error) {290return rs.download(ctx, destination, rs.bucketName(), rs.objectName(name), mappings)291}292293// DownloadSnapshot downloads a snapshot. The snapshot name is expected to be one produced by Qualify294func (rs *DirectGCPStorage) DownloadSnapshot(ctx context.Context, destination string, name string, mappings []archive.IDMapping) (bool, error) {295bkt, obj, err := ParseSnapshotName(name)296if err != nil {297return false, err298}299300return rs.download(ctx, destination, bkt, obj, mappings)301}302303// ParseSnapshotName parses the name of a snapshot into bucket and object304func ParseSnapshotName(name string) (bkt, obj string, err error) {305segments := strings.Split(name, "@")306if len(segments) != 2 {307err = xerrors.Errorf("%s is not a valid GCloud remote storage FQN", name)308return309}310311obj = segments[0]312bkt = segments[1]313return314}315316// ListObjects returns all objects found with the given prefix. Returns an empty list if the bucket does not exuist (yet).317func (rs *DirectGCPStorage) ListObjects(ctx context.Context, prefix string) (objects []string, err error) {318bkt := rs.client.Bucket(rs.bucketName())319_, err = bkt.Attrs(ctx)320if errors.Is(err, gcpstorage.ErrBucketNotExist) {321// bucket does not exist: nothing to list322return nil, nil323}324if err != nil {325return nil, xerrors.Errorf("cannot list objects: %w", err)326}327328iter := bkt.Objects(ctx, &gcpstorage.Query{Prefix: prefix})329var obj *gcpstorage.ObjectAttrs330for obj, err = iter.Next(); obj != nil; obj, err = iter.Next() {331objects = append(objects, obj.Name)332}333if err != iterator.Done && err != nil {334return nil, xerrors.Errorf("cannot iterate list objects: %w", err)335}336337return objects, nil338}339340// Qualify fully qualifies a snapshot name so that it can be downloaded using DownloadSnapshot341func (rs *DirectGCPStorage) Qualify(name string) string {342return fmt.Sprintf("%s@%s", rs.objectName(name), rs.bucketName())343}344345// UploadInstance takes all files from a local location and uploads it to the per-instance remote storage346func (rs *DirectGCPStorage) UploadInstance(ctx context.Context, source string, name string, opts ...UploadOption) (bucket, object string, err error) {347if rs.InstanceID == "" {348return "", "", xerrors.Errorf("instanceID is required to comput object name")349}350return rs.Upload(ctx, source, InstanceObjectName(rs.InstanceID, name), opts...)351}352353// Upload takes all files from a local location and uploads it to the remote storage354func (rs *DirectGCPStorage) Upload(ctx context.Context, source string, name string, opts ...UploadOption) (bucket, object string, err error) {355//nolint:ineffassign356span, ctx := opentracing.StartSpanFromContext(ctx, "GCloudBucketRemotegcpStorage.Upload")357defer tracing.FinishSpan(span, &err)358log := log.WithFields(log.OWI(rs.Username, rs.WorkspaceName, ""))359360if rs.client == nil {361err = xerrors.Errorf("no gcloud client available - did you call Init()?")362return363}364365sfn, err := os.Open(source)366if err != nil {367err = xerrors.Errorf("cannot open file for uploading: %w", err)368return369}370defer sfn.Close()371372stat, err := sfn.Stat()373if err != nil {374return375}376377totalSize := stat.Size()378span.SetTag("totalSize", totalSize)379380bucket = rs.bucketName()381object = rs.objectName(name)382383uploadSpan := opentracing.StartSpan("remote-upload", opentracing.ChildOf(span.Context()))384uploadSpan.SetTag("bucket", bucket)385uploadSpan.SetTag("obj", object)386387err = gcpEnsureExists(ctx, rs.client, bucket, rs.GCPConfig)388if err != nil {389err = xerrors.Errorf("unexpected error: %w", err)390return391}392393var wg sync.WaitGroup394395wg.Add(1)396397go func() {398defer wg.Done()399400sa := ""401if rs.GCPConfig.CredentialsFile != "" {402sa = fmt.Sprintf(`-o "Credentials:gs_service_key_file=%v"`, rs.GCPConfig.CredentialsFile)403}404405args := fmt.Sprintf(`gsutil -q -m %v\406-o "GSUtil:parallel_composite_upload_threshold=150M" \407-o "GSUtil:parallel_process_count=3" \408-o "GSUtil:parallel_thread_count=6" \409cp %s gs://%s`, sa, source, filepath.Join(bucket, object))410411log.WithField("flags", args).Debug("gsutil flags")412413cmd := exec.Command("/bin/bash", []string{"-c", args}...)414var out []byte415out, err = cmd.CombinedOutput()416if err != nil {417log.WithError(err).WithField("out", string(out)).Error("unexpected error uploading file to GCS using gsutil")418err = xerrors.Errorf("unexpected error uploading backup")419return420}421}()422423wg.Wait()424425uploadSpan.Finish()426427err = nil428return429}430431func (rs *DirectGCPStorage) bucketName() string {432return gcpBucketName(rs.Stage, rs.Username)433}434435// Bucket provides the bucket name for a particular user436func (rs *DirectGCPStorage) Bucket(ownerID string) string {437return gcpBucketName(rs.Stage, ownerID)438}439440// BackupObject returns a backup's object name that a direct downloader would download441func (rs *DirectGCPStorage) BackupObject(name string) string {442return rs.objectName(name)443}444445func gcpBucketName(stage config.Stage, ownerID string) string {446return fmt.Sprintf("gitpod-%s-user-%s", stage, ownerID)447}448449func gcpWorkspaceBackupObjectName(workspaceID string, name string) string {450return fmt.Sprintf("%s/%s", workspaceID, name)451}452453func (rs *DirectGCPStorage) workspacePrefix() string {454return fmt.Sprintf("workspaces/%s", rs.WorkspaceName)455}456457func (rs *DirectGCPStorage) objectName(name string) string {458return gcpWorkspaceBackupObjectName(rs.workspacePrefix(), name)459}460461func newGCPClient(ctx context.Context, cfg config.GCPConfig) (*gcpstorage.Client, error) {462credfile := cfg.CredentialsFile463if tproot := os.Getenv("TELEPRESENCE_ROOT"); tproot != "" {464credfile = filepath.Join(tproot, credfile)465}466467client, err := gcpstorage.NewClient(ctx, option.WithCredentialsFile(credfile))468if err != nil {469return nil, xerrors.Errorf("cannot create GCP storage client: %w", err)470}471return client, nil472}473474func newPresignedGCPAccess(config config.GCPConfig, stage config.Stage) (*PresignedGCPStorage, error) {475err := ValidateGCPConfig(&config)476if err != nil {477return nil, xerrors.Errorf("invalid config: %w", err)478}479480credfile := config.CredentialsFile481if tproot := os.Getenv("TELEPRESENCE_ROOT"); tproot != "" {482credfile = filepath.Join(tproot, credfile)483}484485jsonKey, err := os.ReadFile(credfile)486if err != nil {487return nil, xerrors.Errorf("cannot read private key: %w", err)488}489privateKey, err := google.JWTConfigFromJSON(jsonKey)490if err != nil {491return nil, xerrors.Errorf("cannot get private key: %w", err)492}493494ctx, cancel := context.WithCancel(context.Background())495defer cancel()496497// We create a client here just to make sure that we can498client, err := gcpstorage.NewClient(ctx, option.WithCredentialsFile(credfile))499if err != nil {500return nil, xerrors.Errorf("cannot create GCP storage client: %w", err)501}502client.Close()503504if err != nil {505return nil, xerrors.Errorf("cannot get Google access ID: %w", err)506}507508return &PresignedGCPStorage{509config: config,510stage: stage,511privateKey: privateKey.PrivateKey,512accessID: privateKey.Email,513}, nil514}515516// PresignedGCPStorage provides presigned URLs to access GCP storage objects517type PresignedGCPStorage struct {518config config.GCPConfig519stage config.Stage520privateKey []byte521accessID string522}523524// Bucket provides the bucket name for a particular user525func (p *PresignedGCPStorage) Bucket(owner string) string {526return gcpBucketName(p.stage, owner)527}528529// BlobObject returns a blob's object name530func (p *PresignedGCPStorage) BlobObject(userID, name string) (string, error) {531return blobObjectName(name)532}533534// EnsureExists makes sure that the remote storage location exists and can be up- or downloaded from535func (p *PresignedGCPStorage) EnsureExists(ctx context.Context, bucket string) (err error) {536client, err := newGCPClient(ctx, p.config)537if err != nil {538return err539}540//nolint:staticcheck541defer client.Close()542543return gcpEnsureExists(ctx, client, bucket, p.config)544}545546// DiskUsage gives the total objects size of objects that have the given prefix547func (p *PresignedGCPStorage) DiskUsage(ctx context.Context, bucket string, prefix string) (size int64, err error) {548client, err := newGCPClient(ctx, p.config)549if err != nil {550return551}552//nolint:staticcheck553defer client.Close()554555ctx, cancel := context.WithTimeout(ctx, 10*time.Second)556defer cancel()557558if !strings.HasSuffix(prefix, "/") {559prefix = prefix + "/"560}561562var total int64563it := client.Bucket(bucket).Objects(ctx, &gcpstorage.Query{564Prefix: prefix,565})566for {567attrs, err := it.Next()568if err == iterator.Done {569break570}571if err != nil {572return 0, err573}574total += attrs.Size575}576577return total, nil578}579580// SignDownload provides presigned URLs to access remote storage objects581func (p *PresignedGCPStorage) SignDownload(ctx context.Context, bucket, object string, options *SignedURLOptions) (*DownloadInfo, error) {582client, err := newGCPClient(ctx, p.config)583if err != nil {584return nil, err585}586//nolint:staticcheck587defer client.Close()588589bkt := client.Bucket(bucket)590_, err = bkt.Attrs(ctx)591if errors.Is(err, gcpstorage.ErrBucketNotExist) {592return nil, ErrNotFound593}594if err != nil {595return nil, err596}597598obj := bkt.Object(object)599attrs, err := obj.Attrs(ctx)600if errors.Is(err, gcpstorage.ErrObjectNotExist) {601return nil, ErrNotFound602}603if err != nil {604return nil, err605}606res, err := p.downloadInfo(ctx, client, attrs, options)607if err != nil {608return nil, err609}610611return res, nil612}613614func (p *PresignedGCPStorage) downloadInfo(ctx context.Context, client *gcpstorage.Client, obj *gcpstorage.ObjectAttrs, options *SignedURLOptions) (*DownloadInfo, error) {615meta := &ObjectMeta{616ContentType: obj.ContentType,617OCIMediaType: obj.Metadata[ObjectAnnotationOCIContentType],618Digest: obj.Metadata[ObjectAnnotationDigest],619UncompressedDigest: obj.Metadata[ObjectAnnotationUncompressedDigest],620}621url, err := gcpstorage.SignedURL(obj.Bucket, obj.Name, &gcpstorage.SignedURLOptions{622Method: "GET",623GoogleAccessID: p.accessID,624PrivateKey: p.privateKey,625Expires: time.Now().Add(1 * time.Hour),626ContentType: options.ContentType,627})628if err != nil {629return nil, err630}631632return &DownloadInfo{633Meta: *meta,634URL: url,635Size: obj.Size,636}, nil637}638639// SignUpload describes an object for upload640func (p *PresignedGCPStorage) SignUpload(ctx context.Context, bucket, object string, options *SignedURLOptions) (info *UploadInfo, err error) {641client, err := newGCPClient(ctx, p.config)642if err != nil {643return nil, err644}645//nolint:staticcheck646defer client.Close()647648bkt := client.Bucket(bucket)649_, err = bkt.Attrs(ctx)650if errors.Is(err, gcpstorage.ErrBucketNotExist) {651return nil, ErrNotFound652}653if err != nil {654return nil, err655}656657url, err := gcpstorage.SignedURL(bucket, object, &gcpstorage.SignedURLOptions{658Method: "PUT",659GoogleAccessID: p.accessID,660PrivateKey: p.privateKey,661Expires: time.Now().Add(30 * time.Minute),662ContentType: options.ContentType,663})664if err != nil {665return nil, err666}667668return &UploadInfo{669URL: url,670}, nil671}672673// DeleteObject deletes objects in the given bucket specified by the given query674func (p *PresignedGCPStorage) DeleteObject(ctx context.Context, bucket string, query *DeleteObjectQuery) (err error) {675client, err := newGCPClient(ctx, p.config)676if err != nil {677return err678}679//nolint:staticcheck680defer client.Close()681682if query.Name != "" {683err = client.Bucket(bucket).Object(query.Name).Delete(ctx)684if err != nil {685if errors.Is(err, gcpstorage.ErrBucketNotExist) || errors.Is(err, gcpstorage.ErrObjectNotExist) {686return ErrNotFound687}688689log.WithField("bucket", bucket).WithField("object", query.Name).WithError(err).Warn("cannot delete object")690return err691}692return nil693}694695prefix := query.Prefix696b := client.Bucket(bucket)697var it *gcpstorage.ObjectIterator698if prefix != "" && prefix != "/" {699it = b.Objects(ctx, &gcpstorage.Query{700Prefix: prefix,701})702} else {703it = b.Objects(ctx, nil)704}705for {706attrs, err := it.Next()707if err == iterator.Done {708break709}710// if we get any error besides "done" the iterator is broken: make sure we don't use it again.711if err != nil {712if errors.Is(err, gcpstorage.ErrBucketNotExist) {713return ErrNotFound714}715log.WithField("bucket", bucket).WithError(err).Error("error iterating object")716break717}718err = b.Object(attrs.Name).Delete(ctx)719if err != nil {720if errors.Is(err, gcpstorage.ErrBucketNotExist) || errors.Is(err, gcpstorage.ErrObjectNotExist) {721continue722}723log.WithField("bucket", bucket).WithField("object", attrs.Name).WithError(err).Warn("cannot delete object, continue deleting objects")724}725}726return err727}728729// DeleteBucket deletes a bucket730func (p *PresignedGCPStorage) DeleteBucket(ctx context.Context, userID, bucket string) (err error) {731client, err := newGCPClient(ctx, p.config)732if err != nil {733return err734}735//nolint:staticcheck736defer client.Close()737738err = p.DeleteObject(ctx, bucket, &DeleteObjectQuery{})739if err != nil {740return err741}742743err = client.Bucket(bucket).Delete(ctx)744if err != nil {745if e, ok := err.(*googleapi.Error); ok {746if e.Code == http.StatusNotFound {747return ErrNotFound748}749}750if errors.Is(err, gcpstorage.ErrBucketNotExist) {751return ErrNotFound752}753return err754}755return nil756}757758// ObjectHash gets a hash value of an object759func (p *PresignedGCPStorage) ObjectHash(ctx context.Context, bucket string, obj string) (hash string, err error) {760client, err := newGCPClient(ctx, p.config)761if err != nil {762return "", err763}764//nolint:staticcheck765defer client.Close()766767attr, err := client.Bucket(bucket).Object(obj).Attrs(ctx)768if err != nil {769if errors.Is(err, gcpstorage.ErrBucketNotExist) {770return "", ErrNotFound771}772return "", err773}774return hex.EncodeToString(attr.MD5), nil775}776777func (p *PresignedGCPStorage) ObjectExists(ctx context.Context, bucket, obj string) (bool, error) {778client, err := newGCPClient(ctx, p.config)779if err != nil {780return false, err781}782//nolint:staticcheck783defer client.Close()784785_, err = client.Bucket(bucket).Object(obj).Attrs(ctx)786if err != nil {787if errors.Is(err, gcpstorage.ErrBucketNotExist) {788return false, nil789}790if errors.Is(err, gcpstorage.ErrObjectNotExist) {791return false, nil792}793return false, err794}795return true, nil796}797798// BackupObject returns a backup's object name that a direct downloader would download799func (p *PresignedGCPStorage) BackupObject(ownerID string, workspaceID string, name string) string {800return fmt.Sprintf("workspaces/%s", gcpWorkspaceBackupObjectName(workspaceID, name))801}802803// InstanceObject returns a instance's object name that a direct downloader would download804func (p *PresignedGCPStorage) InstanceObject(ownerID string, workspaceID string, instanceID string, name string) string {805return p.BackupObject(ownerID, workspaceID, InstanceObjectName(instanceID, name))806}807808809