Skip to content

Commit

Permalink
fix(bigquery): create read session with client or job projectID (#10932)
Browse files Browse the repository at this point in the history
When reading result sets using the Storage Read API Acceleration enabled, currently the read session is created by default in the table's project. This works for cases where the destination table is not specified and automatically created, which defaults to the project where the the query or job was created. But when reading a table directly or specifying a destination table, it doesn't work in cases where the client doesn't have BQ Storage permissions (just table read permission for example). This is a common use case where some customers have a main billing project and this project has access to other GCP projects with just permission to read data from BigQuery tables.

With this PR, we default to use the defined Query/Job projectID (which defaults to the current `bigquery.Client.projectID`   or when reading the a table directly, we also use default to the `bigquery.Client.projectID`.

Reported initially on PR #10924

~Supersedes #10924~
  • Loading branch information
alvarowolfx authored Oct 1, 2024
1 parent eb25266 commit f98396e
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 9 deletions.
12 changes: 7 additions & 5 deletions bigquery/storage_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func (c *readClient) close() error {
}

// sessionForTable establishes a new session to fetch from a table using the Storage API
func (c *readClient) sessionForTable(ctx context.Context, table *Table, ordered bool) (*readSession, error) {
func (c *readClient) sessionForTable(ctx context.Context, table *Table, rsProjectID string, ordered bool) (*readSession, error) {
tableID, err := table.Identifier(StorageAPIResourceID)
if err != nil {
return nil, err
Expand All @@ -111,6 +111,7 @@ func (c *readClient) sessionForTable(ctx context.Context, table *Table, ordered
ctx: ctx,
table: table,
tableID: tableID,
projectID: rsProjectID,
settings: settings,
readRowsFunc: c.rawClient.ReadRows,
createReadSessionFunc: c.rawClient.CreateReadSession,
Expand All @@ -122,9 +123,10 @@ func (c *readClient) sessionForTable(ctx context.Context, table *Table, ordered
type readSession struct {
settings readClientSettings

ctx context.Context
table *Table
tableID string
ctx context.Context
table *Table
tableID string
projectID string

bqSession *storagepb.ReadSession

Expand All @@ -141,7 +143,7 @@ func (rs *readSession) start() error {
preferredMinStreamCount = int32(rs.settings.maxWorkerCount)
}
createReadSessionRequest := &storagepb.CreateReadSessionRequest{
Parent: fmt.Sprintf("projects/%s", rs.table.ProjectID),
Parent: fmt.Sprintf("projects/%s", rs.projectID),
ReadSession: &storagepb.ReadSession{
Table: rs.tableID,
DataFormat: storagepb.DataFormat_ARROW,
Expand Down
27 changes: 27 additions & 0 deletions bigquery/storage_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"context"
"errors"
"fmt"
"strings"
"testing"
"time"

Expand Down Expand Up @@ -87,6 +88,32 @@ func TestIntegration_StorageReadEmptyResultSet(t *testing.T) {
}
}

func TestIntegration_StorageReadClientProject(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()

table := storageOptimizedClient.Dataset("usa_names").Table("usa_1910_current")
table.ProjectID = "bigquery-public-data"

it := table.Read(ctx)
_, err := countIteratorRows(it)
if err != nil {
t.Fatal(err)
}
if !it.IsAccelerated() {
t.Fatal("expected storage api to be used")
}

session := it.arrowIterator.(*storageArrowIterator).rs
expectedPrefix := fmt.Sprintf("projects/%s", storageOptimizedClient.projectID)
if !strings.HasPrefix(session.bqSession.Name, expectedPrefix) {
t.Fatalf("expected read session to have prefix %q: but found %s:", expectedPrefix, session.bqSession.Name)
}
}

func TestIntegration_StorageReadFromSources(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
Expand Down
6 changes: 3 additions & 3 deletions bigquery/storage_iterator.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,12 @@ type storageArrowIterator struct {

var _ ArrowIterator = &storageArrowIterator{}

func newStorageRowIteratorFromTable(ctx context.Context, table *Table, ordered bool) (*RowIterator, error) {
func newStorageRowIteratorFromTable(ctx context.Context, table *Table, rsProjectID string, ordered bool) (*RowIterator, error) {
md, err := table.Metadata(ctx)
if err != nil {
return nil, err
}
rs, err := table.c.rc.sessionForTable(ctx, table, ordered)
rs, err := table.c.rc.sessionForTable(ctx, table, rsProjectID, ordered)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -95,7 +95,7 @@ func newStorageRowIteratorFromJob(ctx context.Context, j *Job) (*RowIterator, er
return newStorageRowIteratorFromJob(ctx, lastJob)
}
ordered := query.HasOrderedResults(qcfg.Q)
return newStorageRowIteratorFromTable(ctx, qcfg.Dst, ordered)
return newStorageRowIteratorFromTable(ctx, qcfg.Dst, job.projectID, ordered)
}

func resolveLastChildSelectJob(ctx context.Context, job *Job) (*Job, error) {
Expand Down
2 changes: 1 addition & 1 deletion bigquery/table.go
Original file line number Diff line number Diff line change
Expand Up @@ -974,7 +974,7 @@ func (t *Table) Read(ctx context.Context) *RowIterator {

func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator {
if t.c.isStorageReadAvailable() {
it, err := newStorageRowIteratorFromTable(ctx, t, false)
it, err := newStorageRowIteratorFromTable(ctx, t, t.c.projectID, false)
if err == nil {
return it
}
Expand Down

0 comments on commit f98396e

Please sign in to comment.