From bfcecc1279b81172b1533cef0eb60a4ac3366187 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Thu, 14 Mar 2024 18:32:46 +0100 Subject: [PATCH 001/148] Add a load config option to TPS loader --- integration/benchmark/cmd/ci/main.go | 115 +++-- integration/benchmark/cmd/ci/server.go | 28 -- integration/benchmark/contLoadGenerator.go | 15 +- integration/benchmark/mocksiface/mocks.go | 10 - integration/benchmark/proto/generate.go | 3 - .../benchmark/proto/macro_benchmark.pb.go | 435 ------------------ .../benchmark/proto/macro_benchmark.proto | 28 -- .../proto/macro_benchmark_grpc.pb.go | 243 ---------- integration/benchmark/server/bench.sh | 4 +- integration/benchmark/server/branches.recent | 0 integration/benchmark/server/commits.recent | 1 - integration/benchmark/server/flow-go | 1 - integration/benchmark/server/load-config.yml | 12 + integration/benchmark/worker_stats_tracker.go | 2 +- integration/go.mod | 2 +- 15 files changed, 103 insertions(+), 796 deletions(-) delete mode 100644 integration/benchmark/cmd/ci/server.go delete mode 100644 integration/benchmark/mocksiface/mocks.go delete mode 100644 integration/benchmark/proto/generate.go delete mode 100644 integration/benchmark/proto/macro_benchmark.pb.go delete mode 100644 integration/benchmark/proto/macro_benchmark.proto delete mode 100644 integration/benchmark/proto/macro_benchmark_grpc.pb.go delete mode 100644 integration/benchmark/server/branches.recent delete mode 100644 integration/benchmark/server/commits.recent delete mode 160000 integration/benchmark/server/flow-go create mode 100644 integration/benchmark/server/load-config.yml diff --git a/integration/benchmark/cmd/ci/main.go b/integration/benchmark/cmd/ci/main.go index b5f68cc0ec7..1058f4d5541 100644 --- a/integration/benchmark/cmd/ci/main.go +++ b/integration/benchmark/cmd/ci/main.go @@ -3,7 +3,7 @@ package main import ( "context" "flag" - "net" + "gopkg.in/yaml.v3" "os" "strings" "time" @@ -21,7 +21,6 @@ import ( client "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go/integration/benchmark" - pb "github.com/onflow/flow-go/integration/benchmark/proto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" @@ -33,7 +32,7 @@ type BenchmarkInfo struct { // Hardcoded CI values const ( - defaultLoadType = "token-transfer" + defaultLoadType = load.TokenTransferLoadType metricport = uint(8080) accessNodeAddress = "127.0.0.1:4001" pushgateway = "127.0.0.1:9091" @@ -45,35 +44,43 @@ const ( defaultMetricCollectionInterval = 20 * time.Second // gRPC constants - defaultMaxMsgSize = 1024 * 1024 * 16 // 16 MB - defaultGRPCAddress = "127.0.0.1:4777" + defaultMaxMsgSize = 1024 * 1024 * 16 // 16 MB ) func main() { logLvl := flag.String("log-level", "info", "set log level") // CI relevant flags - grpcAddressFlag := flag.String("grpc-address", defaultGRPCAddress, "listen address for gRPC server") initialTPSFlag := flag.Int("tps-initial", 10, "starting transactions per second") maxTPSFlag := flag.Int("tps-max", *initialTPSFlag, "maximum transactions per second allowed") minTPSFlag := flag.Int("tps-min", *initialTPSFlag, "minimum transactions per second allowed") + loadTypeFlag := flag.String("load-type", string(defaultLoadType), "load type (token-transfer / const-exec / evm from the load config file)") + loadConfigFileLocationFlag := flag.String("load-config-file", "", "load config file location. If not provided, default config will be used.") + adjustIntervalFlag := flag.Duration("tps-adjust-interval", defaultAdjustInterval, "interval for adjusting TPS") adjustDelayFlag := flag.Duration("tps-adjust-delay", 120*time.Second, "delay before adjusting TPS") - statIntervalFlag := flag.Duration("stat-interval", defaultMetricCollectionInterval, "") durationFlag := flag.Duration("duration", 10*time.Minute, "test duration") + + statIntervalFlag := flag.Duration("stat-interval", defaultMetricCollectionInterval, "") gitRepoPathFlag := flag.String("git-repo-path", "../..", "git repo path of the filesystem") gitRepoURLFlag := flag.String("git-repo-url", "https://github.com/onflow/flow-go.git", "git repo URL") bigQueryUpload := flag.Bool("bigquery-upload", true, "whether to upload results to BigQuery (true / false)") bigQueryProjectFlag := flag.String("bigquery-project", "dapperlabs-data", "project name for the bigquery uploader") bigQueryDatasetFlag := flag.String("bigquery-dataset", "dev_src_flow_tps_metrics", "dataset name for the bigquery uploader") bigQueryRawTableFlag := flag.String("bigquery-raw-table", "rawResults", "table name for the bigquery raw results") - loadTypeFlag := flag.String("load-type", defaultLoadType, "load type (token-transfer / const-exec / evm)") flag.Parse() - loadType := *loadTypeFlag - log := setupLogger(logLvl) + loadConfig := getLoadConfig( + log, + *loadConfigFileLocationFlag, + *loadTypeFlag, + *minTPSFlag, + *maxTPSFlag, + *initialTPSFlag, + ) + if *gitRepoPathFlag == "" { flag.PrintDefaults() log.Fatal().Msg("git repo path is required") @@ -86,26 +93,6 @@ func main() { <-server.Ready() loaderMetrics := metrics.NewLoaderCollector() - grpcServerOptions := []grpc.ServerOption{ - grpc.MaxRecvMsgSize(defaultMaxMsgSize), - grpc.MaxSendMsgSize(defaultMaxMsgSize), - } - grpcServer := grpc.NewServer(grpcServerOptions...) - defer grpcServer.Stop() - - pb.RegisterBenchmarkServer(grpcServer, &benchmarkServer{}) - - grpcListener, err := net.Listen("tcp", *grpcAddressFlag) - if err != nil { - log.Fatal().Err(err).Str("address", *grpcAddressFlag).Msg("failed to listen") - } - - go func() { - if err := grpcServer.Serve(grpcListener); err != nil { - log.Fatal().Err(err).Msg("failed to serve") - } - }() - sp := benchmark.NewStatsPusher(ctx, log, pushgateway, "loader", prometheus.DefaultGatherer) defer sp.Stop() @@ -136,10 +123,7 @@ func main() { // prepare load generator log.Info(). - Str("load_type", loadType). - Int("initialTPS", *initialTPSFlag). - Int("minTPS", *minTPSFlag). - Int("maxTPS", *maxTPSFlag). + Interface("loadConfig", loadConfig). Dur("duration", *durationFlag). Msg("Running load case") @@ -164,7 +148,7 @@ func main() { }, benchmark.LoadParams{ NumberOfAccounts: maxInflight, - LoadType: load.LoadType(loadType), + LoadConfig: loadConfig, FeedbackEnabled: feedbackEnabled, }, ) @@ -187,9 +171,9 @@ func main() { AdjusterParams{ Delay: *adjustDelayFlag, Interval: *adjustIntervalFlag, - InitialTPS: uint(*initialTPSFlag), - MinTPS: uint(*minTPSFlag), - MaxTPS: uint(*maxTPSFlag), + InitialTPS: uint(loadConfig.TPSInitial), + MinTPS: uint(loadConfig.TpsMin), + MaxTPS: uint(loadConfig.TpsMax), MaxInflight: uint(maxInflight / 2), }, ) @@ -218,7 +202,7 @@ func main() { // only upload valid data if *bigQueryUpload { repoInfo := MustGetRepoInfo(log, *gitRepoURLFlag, *gitRepoPathFlag) - mustUploadData(ctx, log, recorder, repoInfo, *bigQueryProjectFlag, *bigQueryDatasetFlag, *bigQueryRawTableFlag, loadType) + mustUploadData(ctx, log, recorder, repoInfo, *bigQueryProjectFlag, *bigQueryDatasetFlag, *bigQueryRawTableFlag, loadConfig.LoadName) } else { log.Info().Int("raw_tps_size", len(recorder.BenchmarkResults.RawTPS)).Msg("logging tps results locally") // log results locally when not uploading to BigQuery @@ -228,6 +212,55 @@ func main() { } } +func getLoadConfig( + log zerolog.Logger, + loadConfigLocation string, + load string, + minTPS int, + maxTPS int, + initialTPS int, +) benchmark.LoadConfig { + if loadConfigLocation == "" { + lc := benchmark.LoadConfig{ + LoadName: load, + LoadType: load, + TpsMax: maxTPS, + TpsMin: minTPS, + TPSInitial: initialTPS, + } + + log.Info(). + Interface("loadConfig", lc). + Msg("Load config file not provided, using parameters supplied in TPS flags") + return lc + } + + var loadConfigs map[string]benchmark.LoadConfig + + // check if the file exists + if _, err := os.Stat(loadConfigLocation); os.IsNotExist(err) { + log.Fatal().Err(err).Str("loadConfigLocation", loadConfigLocation).Msg("load config file not found") + } + + yamlFile, err := os.ReadFile(loadConfigLocation) + if err != nil { + log.Fatal().Err(err).Str("loadConfigLocation", loadConfigLocation).Msg("failed to read load config file") + } + + err = yaml.Unmarshal(yamlFile, &loadConfigs) + if err != nil { + log.Fatal().Err(err).Str("loadConfigLocation", loadConfigLocation).Msg("failed to unmarshal load config file") + } + + lc, ok := loadConfigs[load] + if !ok { + log.Fatal().Str("load", load).Msg("load not found in load config file") + } + lc.LoadName = load + + return lc +} + // setupLogger parses log level and apply to logger func setupLogger(logLvl *string) zerolog.Logger { log := zerolog.New(os.Stderr). @@ -252,7 +285,7 @@ func mustUploadData( bigQueryProject string, bigQueryDataset string, bigQueryRawTable string, - loadType string, + loadName string, ) { log.Info().Msg("Initializing BigQuery") db, err := NewDB(ctx, log, bigQueryProject) @@ -278,7 +311,7 @@ func mustUploadData( bigQueryRawTable, recorder.BenchmarkResults, *repoInfo, - BenchmarkInfo{BenchmarkType: loadType}, + BenchmarkInfo{BenchmarkType: loadName}, MustGetDefaultEnvironment(), ) if err != nil { diff --git a/integration/benchmark/cmd/ci/server.go b/integration/benchmark/cmd/ci/server.go deleted file mode 100644 index ba72e856ed4..00000000000 --- a/integration/benchmark/cmd/ci/server.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "context" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/emptypb" - - pb "github.com/onflow/flow-go/integration/benchmark/proto" -) - -type benchmarkServer struct { - pb.UnimplementedBenchmarkServer -} - -func (s *benchmarkServer) StartMacroBenchmark(*pb.StartMacroBenchmarkRequest, pb.Benchmark_StartMacroBenchmarkServer) error { - return status.Errorf(codes.Unimplemented, "method StartMacroBenchmark not implemented") -} -func (s *benchmarkServer) GetMacroBenchmark(context.Context, *pb.GetMacroBenchmarkRequest) (*pb.GetMacroBenchmarkResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMacroBenchmark not implemented") -} -func (s *benchmarkServer) ListMacroBenchmarks(context.Context, *emptypb.Empty) (*pb.ListMacroBenchmarksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListMacroBenchmarks not implemented") -} -func (s *benchmarkServer) Status(context.Context, *emptypb.Empty) (*pb.StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} diff --git a/integration/benchmark/contLoadGenerator.go b/integration/benchmark/contLoadGenerator.go index df78ca74c0a..41081de7163 100644 --- a/integration/benchmark/contLoadGenerator.go +++ b/integration/benchmark/contLoadGenerator.go @@ -46,9 +46,20 @@ type NetworkParams struct { ChainId flow.ChainID } +type LoadConfig struct { + // LoadName is the name of the load. This can be different from the LoadType + // and is used to identify the load in the results. The use case is when a single + // load type is used to run multiple loads with different parameters. + LoadName string `yaml:"-"` + LoadType string `yaml:"load_type"` + TpsMax int `default:"1200" yaml:"tps_max"` + TpsMin int `default:"1" yaml:"tps_min"` + TPSInitial int `yaml:"tps_initial"` +} + type LoadParams struct { NumberOfAccounts int - LoadType load.LoadType + LoadConfig LoadConfig // TODO(rbtz): inject a TxFollower FeedbackEnabled bool @@ -157,7 +168,7 @@ func New( Proposer: servAcc, } - l := load.CreateLoadType(log, loadParams.LoadType) + l := load.CreateLoadType(log, load.LoadType(loadParams.LoadConfig.LoadType)) err = l.Setup(log, lc) if err != nil { diff --git a/integration/benchmark/mocksiface/mocks.go b/integration/benchmark/mocksiface/mocks.go deleted file mode 100644 index 0068b5676c2..00000000000 --- a/integration/benchmark/mocksiface/mocks.go +++ /dev/null @@ -1,10 +0,0 @@ -package mocksiface_test - -import ( - "github.com/onflow/flow-go-sdk/access" -) - -// This is a proxy for the real access.Client for mockery to use. -type Client interface { - access.Client -} diff --git a/integration/benchmark/proto/generate.go b/integration/benchmark/proto/generate.go deleted file mode 100644 index b36797e4592..00000000000 --- a/integration/benchmark/proto/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative macro_benchmark.proto - -package proto diff --git a/integration/benchmark/proto/macro_benchmark.pb.go b/integration/benchmark/proto/macro_benchmark.pb.go deleted file mode 100644 index 15fdb7b4cf9..00000000000 --- a/integration/benchmark/proto/macro_benchmark.pb.go +++ /dev/null @@ -1,435 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 -// source: macro_benchmark.proto - -package proto - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type StartMacroBenchmarkRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StartMacroBenchmarkRequest) Reset() { - *x = StartMacroBenchmarkRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartMacroBenchmarkRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartMacroBenchmarkRequest) ProtoMessage() {} - -func (x *StartMacroBenchmarkRequest) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartMacroBenchmarkRequest.ProtoReflect.Descriptor instead. -func (*StartMacroBenchmarkRequest) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{0} -} - -type StartMacroBenchmarkResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StartMacroBenchmarkResponse) Reset() { - *x = StartMacroBenchmarkResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartMacroBenchmarkResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartMacroBenchmarkResponse) ProtoMessage() {} - -func (x *StartMacroBenchmarkResponse) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartMacroBenchmarkResponse.ProtoReflect.Descriptor instead. -func (*StartMacroBenchmarkResponse) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{1} -} - -type GetMacroBenchmarkRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetMacroBenchmarkRequest) Reset() { - *x = GetMacroBenchmarkRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetMacroBenchmarkRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetMacroBenchmarkRequest) ProtoMessage() {} - -func (x *GetMacroBenchmarkRequest) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetMacroBenchmarkRequest.ProtoReflect.Descriptor instead. -func (*GetMacroBenchmarkRequest) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{2} -} - -type GetMacroBenchmarkResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetMacroBenchmarkResponse) Reset() { - *x = GetMacroBenchmarkResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetMacroBenchmarkResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetMacroBenchmarkResponse) ProtoMessage() {} - -func (x *GetMacroBenchmarkResponse) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetMacroBenchmarkResponse.ProtoReflect.Descriptor instead. -func (*GetMacroBenchmarkResponse) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{3} -} - -type ListMacroBenchmarksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ListMacroBenchmarksResponse) Reset() { - *x = ListMacroBenchmarksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListMacroBenchmarksResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListMacroBenchmarksResponse) ProtoMessage() {} - -func (x *ListMacroBenchmarksResponse) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListMacroBenchmarksResponse.ProtoReflect.Descriptor instead. -func (*ListMacroBenchmarksResponse) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{4} -} - -type StatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StatusResponse) Reset() { - *x = StatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse) ProtoMessage() {} - -func (x *StatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. -func (*StatusResponse) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{5} -} - -var File_macro_benchmark_proto protoreflect.FileDescriptor - -var file_macro_benchmark_proto_rawDesc = []byte{ - 0x0a, 0x15, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, - 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, - 0x72, 0x6b, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x1c, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, - 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1d, 0x0a, - 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, - 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x0a, 0x18, - 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1b, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x4d, - 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x0a, 0x1b, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, - 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x10, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xef, 0x02, 0x0a, 0x09, 0x42, 0x65, 0x6e, 0x63, 0x68, - 0x6d, 0x61, 0x72, 0x6b, 0x12, 0x68, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, - 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x12, 0x25, 0x2e, 0x62, 0x65, - 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, - 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, - 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x60, - 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, - 0x61, 0x72, 0x6b, 0x12, 0x23, 0x2e, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, - 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x62, 0x65, 0x6e, 0x63, 0x68, - 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, - 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x57, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, - 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x26, 0x2e, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x06, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x62, 0x65, - 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x66, 0x6c, - 0x6f, 0x77, 0x2d, 0x67, 0x6f, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x62, 0x65, 0x63, 0x6e, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_macro_benchmark_proto_rawDescOnce sync.Once - file_macro_benchmark_proto_rawDescData = file_macro_benchmark_proto_rawDesc -) - -func file_macro_benchmark_proto_rawDescGZIP() []byte { - file_macro_benchmark_proto_rawDescOnce.Do(func() { - file_macro_benchmark_proto_rawDescData = protoimpl.X.CompressGZIP(file_macro_benchmark_proto_rawDescData) - }) - return file_macro_benchmark_proto_rawDescData -} - -var file_macro_benchmark_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_macro_benchmark_proto_goTypes = []interface{}{ - (*StartMacroBenchmarkRequest)(nil), // 0: benchmark.StartMacroBenchmarkRequest - (*StartMacroBenchmarkResponse)(nil), // 1: benchmark.StartMacroBenchmarkResponse - (*GetMacroBenchmarkRequest)(nil), // 2: benchmark.GetMacroBenchmarkRequest - (*GetMacroBenchmarkResponse)(nil), // 3: benchmark.GetMacroBenchmarkResponse - (*ListMacroBenchmarksResponse)(nil), // 4: benchmark.ListMacroBenchmarksResponse - (*StatusResponse)(nil), // 5: benchmark.StatusResponse - (*emptypb.Empty)(nil), // 6: google.protobuf.Empty -} -var file_macro_benchmark_proto_depIdxs = []int32{ - 0, // 0: benchmark.Benchmark.StartMacroBenchmark:input_type -> benchmark.StartMacroBenchmarkRequest - 2, // 1: benchmark.Benchmark.GetMacroBenchmark:input_type -> benchmark.GetMacroBenchmarkRequest - 6, // 2: benchmark.Benchmark.ListMacroBenchmarks:input_type -> google.protobuf.Empty - 6, // 3: benchmark.Benchmark.Status:input_type -> google.protobuf.Empty - 1, // 4: benchmark.Benchmark.StartMacroBenchmark:output_type -> benchmark.StartMacroBenchmarkResponse - 3, // 5: benchmark.Benchmark.GetMacroBenchmark:output_type -> benchmark.GetMacroBenchmarkResponse - 4, // 6: benchmark.Benchmark.ListMacroBenchmarks:output_type -> benchmark.ListMacroBenchmarksResponse - 5, // 7: benchmark.Benchmark.Status:output_type -> benchmark.StatusResponse - 4, // [4:8] is the sub-list for method output_type - 0, // [0:4] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_macro_benchmark_proto_init() } -func file_macro_benchmark_proto_init() { - if File_macro_benchmark_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_macro_benchmark_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartMacroBenchmarkRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartMacroBenchmarkResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMacroBenchmarkRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMacroBenchmarkResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListMacroBenchmarksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_macro_benchmark_proto_rawDesc, - NumEnums: 0, - NumMessages: 6, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_macro_benchmark_proto_goTypes, - DependencyIndexes: file_macro_benchmark_proto_depIdxs, - MessageInfos: file_macro_benchmark_proto_msgTypes, - }.Build() - File_macro_benchmark_proto = out.File - file_macro_benchmark_proto_rawDesc = nil - file_macro_benchmark_proto_goTypes = nil - file_macro_benchmark_proto_depIdxs = nil -} diff --git a/integration/benchmark/proto/macro_benchmark.proto b/integration/benchmark/proto/macro_benchmark.proto deleted file mode 100644 index e461ea81892..00000000000 --- a/integration/benchmark/proto/macro_benchmark.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package benchmark; -option go_package = "github.com/onflow/flow-go/integration/becnhmark/proto"; - -import "google/protobuf/empty.proto"; - -message StartMacroBenchmarkRequest {} -message StartMacroBenchmarkResponse {} - -message GetMacroBenchmarkRequest {} -message GetMacroBenchmarkResponse {} - -message ListMacroBenchmarksResponse {} - -message StatusResponse {} - -service Benchmark { - rpc StartMacroBenchmark(StartMacroBenchmarkRequest) - returns (stream StartMacroBenchmarkResponse) {} - rpc GetMacroBenchmark(GetMacroBenchmarkRequest) - returns (GetMacroBenchmarkResponse) {} - rpc ListMacroBenchmarks(google.protobuf.Empty) - returns (ListMacroBenchmarksResponse) {} - - rpc Status(google.protobuf.Empty) returns (StatusResponse) {} -} - diff --git a/integration/benchmark/proto/macro_benchmark_grpc.pb.go b/integration/benchmark/proto/macro_benchmark_grpc.pb.go deleted file mode 100644 index 065a26fcb39..00000000000 --- a/integration/benchmark/proto/macro_benchmark_grpc.pb.go +++ /dev/null @@ -1,243 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.9 -// source: macro_benchmark.proto - -package proto - -import ( - context "context" - - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// BenchmarkClient is the client API for Benchmark service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type BenchmarkClient interface { - StartMacroBenchmark(ctx context.Context, in *StartMacroBenchmarkRequest, opts ...grpc.CallOption) (Benchmark_StartMacroBenchmarkClient, error) - GetMacroBenchmark(ctx context.Context, in *GetMacroBenchmarkRequest, opts ...grpc.CallOption) (*GetMacroBenchmarkResponse, error) - ListMacroBenchmarks(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListMacroBenchmarksResponse, error) - Status(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*StatusResponse, error) -} - -type benchmarkClient struct { - cc grpc.ClientConnInterface -} - -func NewBenchmarkClient(cc grpc.ClientConnInterface) BenchmarkClient { - return &benchmarkClient{cc} -} - -func (c *benchmarkClient) StartMacroBenchmark(ctx context.Context, in *StartMacroBenchmarkRequest, opts ...grpc.CallOption) (Benchmark_StartMacroBenchmarkClient, error) { - stream, err := c.cc.NewStream(ctx, &Benchmark_ServiceDesc.Streams[0], "/benchmark.Benchmark/StartMacroBenchmark", opts...) - if err != nil { - return nil, err - } - x := &benchmarkStartMacroBenchmarkClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Benchmark_StartMacroBenchmarkClient interface { - Recv() (*StartMacroBenchmarkResponse, error) - grpc.ClientStream -} - -type benchmarkStartMacroBenchmarkClient struct { - grpc.ClientStream -} - -func (x *benchmarkStartMacroBenchmarkClient) Recv() (*StartMacroBenchmarkResponse, error) { - m := new(StartMacroBenchmarkResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *benchmarkClient) GetMacroBenchmark(ctx context.Context, in *GetMacroBenchmarkRequest, opts ...grpc.CallOption) (*GetMacroBenchmarkResponse, error) { - out := new(GetMacroBenchmarkResponse) - err := c.cc.Invoke(ctx, "/benchmark.Benchmark/GetMacroBenchmark", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *benchmarkClient) ListMacroBenchmarks(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListMacroBenchmarksResponse, error) { - out := new(ListMacroBenchmarksResponse) - err := c.cc.Invoke(ctx, "/benchmark.Benchmark/ListMacroBenchmarks", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *benchmarkClient) Status(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/benchmark.Benchmark/Status", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// BenchmarkServer is the server API for Benchmark service. -// All implementations must embed UnimplementedBenchmarkServer -// for forward compatibility -type BenchmarkServer interface { - StartMacroBenchmark(*StartMacroBenchmarkRequest, Benchmark_StartMacroBenchmarkServer) error - GetMacroBenchmark(context.Context, *GetMacroBenchmarkRequest) (*GetMacroBenchmarkResponse, error) - ListMacroBenchmarks(context.Context, *emptypb.Empty) (*ListMacroBenchmarksResponse, error) - Status(context.Context, *emptypb.Empty) (*StatusResponse, error) - mustEmbedUnimplementedBenchmarkServer() -} - -// UnimplementedBenchmarkServer must be embedded to have forward compatible implementations. -type UnimplementedBenchmarkServer struct { -} - -func (UnimplementedBenchmarkServer) StartMacroBenchmark(*StartMacroBenchmarkRequest, Benchmark_StartMacroBenchmarkServer) error { - return status.Errorf(codes.Unimplemented, "method StartMacroBenchmark not implemented") -} -func (UnimplementedBenchmarkServer) GetMacroBenchmark(context.Context, *GetMacroBenchmarkRequest) (*GetMacroBenchmarkResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMacroBenchmark not implemented") -} -func (UnimplementedBenchmarkServer) ListMacroBenchmarks(context.Context, *emptypb.Empty) (*ListMacroBenchmarksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListMacroBenchmarks not implemented") -} -func (UnimplementedBenchmarkServer) Status(context.Context, *emptypb.Empty) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (UnimplementedBenchmarkServer) mustEmbedUnimplementedBenchmarkServer() {} - -// UnsafeBenchmarkServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to BenchmarkServer will -// result in compilation errors. -type UnsafeBenchmarkServer interface { - mustEmbedUnimplementedBenchmarkServer() -} - -func RegisterBenchmarkServer(s grpc.ServiceRegistrar, srv BenchmarkServer) { - s.RegisterService(&Benchmark_ServiceDesc, srv) -} - -func _Benchmark_StartMacroBenchmark_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(StartMacroBenchmarkRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(BenchmarkServer).StartMacroBenchmark(m, &benchmarkStartMacroBenchmarkServer{stream}) -} - -type Benchmark_StartMacroBenchmarkServer interface { - Send(*StartMacroBenchmarkResponse) error - grpc.ServerStream -} - -type benchmarkStartMacroBenchmarkServer struct { - grpc.ServerStream -} - -func (x *benchmarkStartMacroBenchmarkServer) Send(m *StartMacroBenchmarkResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Benchmark_GetMacroBenchmark_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetMacroBenchmarkRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BenchmarkServer).GetMacroBenchmark(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/benchmark.Benchmark/GetMacroBenchmark", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BenchmarkServer).GetMacroBenchmark(ctx, req.(*GetMacroBenchmarkRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Benchmark_ListMacroBenchmarks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BenchmarkServer).ListMacroBenchmarks(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/benchmark.Benchmark/ListMacroBenchmarks", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BenchmarkServer).ListMacroBenchmarks(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Benchmark_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BenchmarkServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/benchmark.Benchmark/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BenchmarkServer).Status(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -// Benchmark_ServiceDesc is the grpc.ServiceDesc for Benchmark service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Benchmark_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "benchmark.Benchmark", - HandlerType: (*BenchmarkServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetMacroBenchmark", - Handler: _Benchmark_GetMacroBenchmark_Handler, - }, - { - MethodName: "ListMacroBenchmarks", - Handler: _Benchmark_ListMacroBenchmarks_Handler, - }, - { - MethodName: "Status", - Handler: _Benchmark_Status_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StartMacroBenchmark", - Handler: _Benchmark_StartMacroBenchmark_Handler, - ServerStreams: true, - }, - }, - Metadata: "macro_benchmark.proto", -} diff --git a/integration/benchmark/server/bench.sh b/integration/benchmark/server/bench.sh index 778cac6279d..d4859c18dc0 100755 --- a/integration/benchmark/server/bench.sh +++ b/integration/benchmark/server/bench.sh @@ -7,7 +7,7 @@ set -o pipefail # this will keep the TPS automation code separate from the code that's being tested so we won't run into issues # of having old versions of automation code just because we happen to be testing an older version flow-go git clone https://github.com/onflow/flow-go.git -cd flow-go/integration/localnet +cd flow-go/integration/localnet || exit git fetch git fetch --tags @@ -37,7 +37,7 @@ while read -r input; do # sleep is workaround for slow initialization of some node types, so that benchmark does not quit immediately with "connection refused" sleep 30; - go run ../benchmark/cmd/ci -log-level debug -git-repo-path ../../ -tps-initial 800 -tps-min 1 -tps-max 1200 -duration 30m -load-type "$load" + go run ../benchmark/cmd/ci -log-level info -git-repo-path ../../ -tps-initial 800 -tps-min 1 -tps-max 1200 -duration 30m -load-type "$load" # instead of running "make stop" which uses docker-compose for a lot of older versions, # we explicitly run the command here with "docker compose" diff --git a/integration/benchmark/server/branches.recent b/integration/benchmark/server/branches.recent deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/integration/benchmark/server/commits.recent b/integration/benchmark/server/commits.recent deleted file mode 100644 index 538b5965dcc..00000000000 --- a/integration/benchmark/server/commits.recent +++ /dev/null @@ -1 +0,0 @@ -janez/tps-benchmark-evm-load:894151a2390b11e3d9a399b41746d1c112f745fa:evm diff --git a/integration/benchmark/server/flow-go b/integration/benchmark/server/flow-go deleted file mode 160000 index 894151a2390..00000000000 --- a/integration/benchmark/server/flow-go +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 894151a2390b11e3d9a399b41746d1c112f745fa diff --git a/integration/benchmark/server/load-config.yml b/integration/benchmark/server/load-config.yml new file mode 100644 index 00000000000..e22b4dc0cc4 --- /dev/null +++ b/integration/benchmark/server/load-config.yml @@ -0,0 +1,12 @@ +- token-transfer: + load_type: token-transfer + tps_initial: 800 +- create-account: + load_type: create-account + tps_initial: 600 +- ledger-heavy: + load_type: ledger-heavy + tps_initial: 3 +- evm-transfer: + load_type: evm-transfer + tps_initial: 500 diff --git a/integration/benchmark/worker_stats_tracker.go b/integration/benchmark/worker_stats_tracker.go index d2a0f60f92e..cd582a2c2bf 100644 --- a/integration/benchmark/worker_stats_tracker.go +++ b/integration/benchmark/worker_stats_tracker.go @@ -133,7 +133,7 @@ func NewPeriodicStatsLogger( w := NewWorker( ctx, 0, - 1*time.Second, + 3*time.Second, func(workerID int) { stats := st.GetStats() log.Info(). diff --git a/integration/go.mod b/integration/go.mod index 439d4aaceed..35288bd997c 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -42,6 +42,7 @@ require ( golang.org/x/sync v0.6.0 google.golang.org/grpc v1.60.1 google.golang.org/protobuf v1.32.0 + gopkg.in/yaml.v3 v3.0.1 ) require ( @@ -348,7 +349,6 @@ require ( google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect modernc.org/libc v1.22.3 // indirect modernc.org/mathutil v1.5.0 // indirect From 0e639e5a0d467ba77c6d6f7303bb5d8ff099f342 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Thu, 14 Mar 2024 18:46:51 +0100 Subject: [PATCH 002/148] add load config to bench.sh --- integration/benchmark/cmd/ci/main.go | 2 +- integration/benchmark/server/bench.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/integration/benchmark/cmd/ci/main.go b/integration/benchmark/cmd/ci/main.go index 1058f4d5541..08cff5eb57c 100644 --- a/integration/benchmark/cmd/ci/main.go +++ b/integration/benchmark/cmd/ci/main.go @@ -55,7 +55,7 @@ func main() { maxTPSFlag := flag.Int("tps-max", *initialTPSFlag, "maximum transactions per second allowed") minTPSFlag := flag.Int("tps-min", *initialTPSFlag, "minimum transactions per second allowed") loadTypeFlag := flag.String("load-type", string(defaultLoadType), "load type (token-transfer / const-exec / evm from the load config file)") - loadConfigFileLocationFlag := flag.String("load-config-file", "", "load config file location. If not provided, default config will be used.") + loadConfigFileLocationFlag := flag.String("load-config", "", "load config file location. If not provided, default config will be used.") adjustIntervalFlag := flag.Duration("tps-adjust-interval", defaultAdjustInterval, "interval for adjusting TPS") adjustDelayFlag := flag.Duration("tps-adjust-delay", 120*time.Second, "delay before adjusting TPS") diff --git a/integration/benchmark/server/bench.sh b/integration/benchmark/server/bench.sh index d4859c18dc0..f2bd6dbec60 100755 --- a/integration/benchmark/server/bench.sh +++ b/integration/benchmark/server/bench.sh @@ -37,7 +37,7 @@ while read -r input; do # sleep is workaround for slow initialization of some node types, so that benchmark does not quit immediately with "connection refused" sleep 30; - go run ../benchmark/cmd/ci -log-level info -git-repo-path ../../ -tps-initial 800 -tps-min 1 -tps-max 1200 -duration 30m -load-type "$load" + go run ../benchmark/cmd/ci -log-level info -git-repo-path ../../ -tps-initial 800 -tps-min 1 -tps-max 1200 -duration 30m -load-type "$load" -load-config "load-config.yml" # instead of running "make stop" which uses docker-compose for a lot of older versions, # we explicitly run the command here with "docker compose" From f60cf9b889d565264a0ce4db9e07bc000ca4278e Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Thu, 14 Mar 2024 19:05:24 +0100 Subject: [PATCH 003/148] change control for testing --- integration/benchmark/server/control.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/integration/benchmark/server/control.sh b/integration/benchmark/server/control.sh index 26ecad5a289..02bebb100aa 100755 --- a/integration/benchmark/server/control.sh +++ b/integration/benchmark/server/control.sh @@ -21,7 +21,8 @@ commits_file="/opt/commits.recent" load_types=("token-transfer" "create-account" "ledger-heavy" "evm-transfer") # get the merge commits from the last week from master ordered by author date -for commit in $(git log --merges --first-parent --format="%S:%H" origin/master --since '1 week' --author-date-order ) +# TEMPORARY: DO NOT MERGE!! +for commit in $(git log --first-parent --format="%S:%H" origin/janez/improve-tps-metering --since '1 week' --author-date-order | head -1) do for load in "${load_types[@]}" do From 696f35b686b6cb1dfcba3bc5858462bd8cb48b5f Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Thu, 14 Mar 2024 19:11:01 +0100 Subject: [PATCH 004/148] change load-config location --- integration/benchmark/server/bench.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/benchmark/server/bench.sh b/integration/benchmark/server/bench.sh index f2bd6dbec60..161549aba0f 100755 --- a/integration/benchmark/server/bench.sh +++ b/integration/benchmark/server/bench.sh @@ -37,7 +37,7 @@ while read -r input; do # sleep is workaround for slow initialization of some node types, so that benchmark does not quit immediately with "connection refused" sleep 30; - go run ../benchmark/cmd/ci -log-level info -git-repo-path ../../ -tps-initial 800 -tps-min 1 -tps-max 1200 -duration 30m -load-type "$load" -load-config "load-config.yml" + go run ../benchmark/cmd/ci -log-level info -git-repo-path ../../ -tps-initial 800 -tps-min 1 -tps-max 1200 -duration 30m -load-type "$load" -load-config "../benchmark/server/load-config.yml" # instead of running "make stop" which uses docker-compose for a lot of older versions, # we explicitly run the command here with "docker compose" From 7adb2f49619dfafe8f446722dd434666bb15c6f9 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Thu, 14 Mar 2024 19:36:48 +0100 Subject: [PATCH 005/148] fix yaml --- integration/benchmark/server/load-config.yml | 24 ++++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/integration/benchmark/server/load-config.yml b/integration/benchmark/server/load-config.yml index e22b4dc0cc4..8b81ca20cdc 100644 --- a/integration/benchmark/server/load-config.yml +++ b/integration/benchmark/server/load-config.yml @@ -1,12 +1,12 @@ -- token-transfer: - load_type: token-transfer - tps_initial: 800 -- create-account: - load_type: create-account - tps_initial: 600 -- ledger-heavy: - load_type: ledger-heavy - tps_initial: 3 -- evm-transfer: - load_type: evm-transfer - tps_initial: 500 +token-transfer: + load_type: token-transfer + tps_initial: 800 +create-account: + load_type: create-account + tps_initial: 600 +ledger-heavy: + load_type: ledger-heavy + tps_initial: 3 +evm-transfer: + load_type: evm-transfer + tps_initial: 500 From eead0551db198d70d49b5f912f323fab0a90e92f Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Thu, 14 Mar 2024 19:53:12 +0100 Subject: [PATCH 006/148] default values for load-config --- integration/benchmark/contLoadGenerator.go | 23 ++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/integration/benchmark/contLoadGenerator.go b/integration/benchmark/contLoadGenerator.go index 41081de7163..7ca9586e8bd 100644 --- a/integration/benchmark/contLoadGenerator.go +++ b/integration/benchmark/contLoadGenerator.go @@ -8,6 +8,7 @@ import ( "time" "github.com/rs/zerolog" + "gopkg.in/yaml.v3" flowsdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/access" @@ -52,11 +53,29 @@ type LoadConfig struct { // load type is used to run multiple loads with different parameters. LoadName string `yaml:"-"` LoadType string `yaml:"load_type"` - TpsMax int `default:"1200" yaml:"tps_max"` - TpsMin int `default:"1" yaml:"tps_min"` + TpsMax int `yaml:"tps_max"` + TpsMin int `yaml:"tps_min"` TPSInitial int `yaml:"tps_initial"` } +func DefaultLoadConfig() LoadConfig { + return LoadConfig{ + TpsMax: 1200, + TpsMin: 1, + } +} + +func (s *LoadConfig) UnmarshalYAML(value *yaml.Node) error { + config := DefaultLoadConfig() + + if err := value.Decode(&config); err != nil { + return err + } + + *s = config + return nil +} + type LoadParams struct { NumberOfAccounts int LoadConfig LoadConfig From d844232bf6f5ad13dede2ebe20155835df7d3437 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Thu, 14 Mar 2024 20:00:57 +0100 Subject: [PATCH 007/148] dont use defaults --- integration/benchmark/contLoadGenerator.go | 22 +------------------- integration/benchmark/server/load-config.yml | 8 +++++++ 2 files changed, 9 insertions(+), 21 deletions(-) diff --git a/integration/benchmark/contLoadGenerator.go b/integration/benchmark/contLoadGenerator.go index 7ca9586e8bd..eb0032814ae 100644 --- a/integration/benchmark/contLoadGenerator.go +++ b/integration/benchmark/contLoadGenerator.go @@ -7,9 +7,6 @@ import ( "sync" "time" - "github.com/rs/zerolog" - "gopkg.in/yaml.v3" - flowsdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/access" "github.com/onflow/flow-go-sdk/crypto" @@ -20,6 +17,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" + "github.com/rs/zerolog" ) const lostTransactionThreshold = 180 * time.Second @@ -58,24 +56,6 @@ type LoadConfig struct { TPSInitial int `yaml:"tps_initial"` } -func DefaultLoadConfig() LoadConfig { - return LoadConfig{ - TpsMax: 1200, - TpsMin: 1, - } -} - -func (s *LoadConfig) UnmarshalYAML(value *yaml.Node) error { - config := DefaultLoadConfig() - - if err := value.Decode(&config); err != nil { - return err - } - - *s = config - return nil -} - type LoadParams struct { NumberOfAccounts int LoadConfig LoadConfig diff --git a/integration/benchmark/server/load-config.yml b/integration/benchmark/server/load-config.yml index 8b81ca20cdc..f7c62d31729 100644 --- a/integration/benchmark/server/load-config.yml +++ b/integration/benchmark/server/load-config.yml @@ -1,12 +1,20 @@ token-transfer: load_type: token-transfer tps_initial: 800 + tps_min: 1 + tps_max: 1200 create-account: load_type: create-account tps_initial: 600 + tps_min: 1 + tps_max: 1200 ledger-heavy: load_type: ledger-heavy tps_initial: 3 + tps_min: 1 + tps_max: 1200 evm-transfer: load_type: evm-transfer tps_initial: 500 + tps_min: 1 + tps_max: 1200 From b897862fe6ce420df20a3bb3e6da494f2b6820b9 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Fri, 15 Mar 2024 12:59:29 +0100 Subject: [PATCH 008/148] fix build and lint --- integration/benchmark/cmd/ci/main.go | 6 ++---- integration/benchmark/cmd/manual/main.go | 12 ++++++++---- integration/benchmark/contLoadGenerator.go | 3 ++- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/integration/benchmark/cmd/ci/main.go b/integration/benchmark/cmd/ci/main.go index 08cff5eb57c..4db99ae21cd 100644 --- a/integration/benchmark/cmd/ci/main.go +++ b/integration/benchmark/cmd/ci/main.go @@ -3,24 +3,22 @@ package main import ( "context" "flag" - "gopkg.in/yaml.v3" "os" "strings" "time" - "github.com/onflow/flow-go/integration/benchmark/load" - "github.com/prometheus/client_golang/prometheus" - "github.com/rs/zerolog" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + "gopkg.in/yaml.v3" flowsdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/access" client "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go/integration/benchmark" + "github.com/onflow/flow-go/integration/benchmark/load" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" diff --git a/integration/benchmark/cmd/manual/main.go b/integration/benchmark/cmd/manual/main.go index f42e21ef894..ffaa9615570 100644 --- a/integration/benchmark/cmd/manual/main.go +++ b/integration/benchmark/cmd/manual/main.go @@ -9,8 +9,6 @@ import ( "strings" "time" - "github.com/onflow/flow-go/integration/benchmark/load" - "github.com/prometheus/client_golang/prometheus" "github.com/rs/zerolog" "google.golang.org/grpc" @@ -132,8 +130,14 @@ func main() { }, benchmark.LoadParams{ NumberOfAccounts: int(maxTPS) * *accountMultiplierFlag, - LoadType: load.LoadType(*loadTypeFlag), - FeedbackEnabled: *feedbackEnabled, + LoadConfig: benchmark.LoadConfig{ + LoadName: *loadTypeFlag, + LoadType: *loadTypeFlag, + TpsMax: int(maxTPS), + TpsMin: int(maxTPS), + TPSInitial: int(maxTPS), + }, + FeedbackEnabled: *feedbackEnabled, }, ) if err != nil { diff --git a/integration/benchmark/contLoadGenerator.go b/integration/benchmark/contLoadGenerator.go index eb0032814ae..4b5c147b8ff 100644 --- a/integration/benchmark/contLoadGenerator.go +++ b/integration/benchmark/contLoadGenerator.go @@ -7,6 +7,8 @@ import ( "sync" "time" + "github.com/rs/zerolog" + flowsdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/access" "github.com/onflow/flow-go-sdk/crypto" @@ -17,7 +19,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" - "github.com/rs/zerolog" ) const lostTransactionThreshold = 180 * time.Second From 68592cb1507f0ebd50d44c98aadce1ba84e3f080 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Wed, 20 Mar 2024 16:05:48 +0100 Subject: [PATCH 009/148] fix makefile --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index 724f84d5ed7..7ba432f6bb6 100644 --- a/Makefile +++ b/Makefile @@ -157,7 +157,6 @@ generate-mocks: install-mock-generators mockery --name '(Connector|PingInfoProvider)' --dir=network/p2p --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" CGO_CFLAGS=$(CRYPTO_FLAG) mockgen -destination=storage/mocks/storage.go -package=mocks github.com/onflow/flow-go/storage Blocks,Headers,Payloads,Collections,Commits,Events,ServiceEvents,TransactionResults CGO_CFLAGS=$(CRYPTO_FLAG) mockgen -destination=network/mocknetwork/mock_network.go -package=mocknetwork github.com/onflow/flow-go/network EngineRegistry - mockery --name='.*' --dir=integration/benchmark/mocksiface --case=underscore --output="integration/benchmark/mock" --outpkg="mock" mockery --name=ExecutionDataStore --dir=module/executiondatasync/execution_data --case=underscore --output="./module/executiondatasync/execution_data/mock" --outpkg="mock" mockery --name=Downloader --dir=module/executiondatasync/execution_data --case=underscore --output="./module/executiondatasync/execution_data/mock" --outpkg="mock" mockery --name '(ExecutionDataRequester|IndexReporter)' --dir=module/state_synchronization --case=underscore --output="./module/state_synchronization/mock" --outpkg="state_synchronization" From 311115fa33896c3f3ee01f42da2e3986ea8b46fd Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 22 Mar 2024 00:38:06 -0400 Subject: [PATCH 010/148] move reusable util funcs to common package - update signatures - update usages --- cmd/bootstrap/cmd/check_machine_account.go | 11 +- cmd/bootstrap/cmd/db_encryption_key.go | 10 +- cmd/bootstrap/cmd/dkg.go | 11 +- cmd/bootstrap/cmd/final_list.go | 27 ++- cmd/bootstrap/cmd/finalize.go | 174 ++---------------- cmd/bootstrap/cmd/finalize_test.go | 7 +- cmd/bootstrap/cmd/genconfig.go | 6 +- cmd/bootstrap/cmd/key.go | 61 +++--- cmd/bootstrap/cmd/keygen.go | 21 ++- cmd/bootstrap/cmd/keys.go | 11 +- cmd/bootstrap/cmd/machine_account.go | 15 +- cmd/bootstrap/cmd/machine_account_key.go | 8 +- cmd/bootstrap/cmd/machine_account_key_test.go | 5 +- cmd/bootstrap/cmd/machine_account_test.go | 5 +- cmd/bootstrap/cmd/observer_network_key.go | 3 +- cmd/bootstrap/cmd/partner_infos.go | 10 +- cmd/bootstrap/cmd/qc.go | 6 +- cmd/bootstrap/cmd/rootblock.go | 24 ++- cmd/bootstrap/cmd/util.go | 97 ---------- cmd/dynamic_startup.go | 104 +---------- cmd/dynamic_startup_test.go | 7 +- .../cmd => util/cmd/common}/clusters.go | 36 ++-- cmd/util/cmd/common/node_info.go | 169 +++++++++++++++++ cmd/util/cmd/common/snapshot.go | 104 +++++++++++ cmd/util/cmd/common/utils.go | 130 +++++++++++++ cmd/utils.go | 1 - 26 files changed, 599 insertions(+), 464 deletions(-) rename cmd/{bootstrap/cmd => util/cmd/common}/clusters.go (76%) create mode 100644 cmd/util/cmd/common/node_info.go create mode 100644 cmd/util/cmd/common/snapshot.go create mode 100644 cmd/util/cmd/common/utils.go diff --git a/cmd/bootstrap/cmd/check_machine_account.go b/cmd/bootstrap/cmd/check_machine_account.go index e2261012219..5594f483060 100644 --- a/cmd/bootstrap/cmd/check_machine_account.go +++ b/cmd/bootstrap/cmd/check_machine_account.go @@ -13,6 +13,7 @@ import ( sdk "github.com/onflow/flow-go-sdk" client "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/module/epochs" ) @@ -44,7 +45,10 @@ func checkMachineAccountRun(_ *cobra.Command, _ []string) { // read the private node information - used to get the role var nodeInfoPriv model.NodeInfoPriv - readJSON(filepath.Join(flagOutdir, fmt.Sprintf(model.PathNodeInfoPriv, nodeID)), &nodeInfoPriv) + err = common.ReadJSON(filepath.Join(flagOutdir, fmt.Sprintf(model.PathNodeInfoPriv, nodeID)), &nodeInfoPriv) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } // read the machine account info file machineAccountInfo := readMachineAccountInfo(nodeID) @@ -97,7 +101,10 @@ func readMachineAccountInfo(nodeID string) model.NodeMachineAccountInfo { var machineAccountInfo model.NodeMachineAccountInfo path := filepath.Join(flagOutdir, fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID)) - readJSON(path, &machineAccountInfo) + err := common.ReadJSON(path, &machineAccountInfo) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } return machineAccountInfo } diff --git a/cmd/bootstrap/cmd/db_encryption_key.go b/cmd/bootstrap/cmd/db_encryption_key.go index c99843e859b..560d6d17c02 100644 --- a/cmd/bootstrap/cmd/db_encryption_key.go +++ b/cmd/bootstrap/cmd/db_encryption_key.go @@ -7,6 +7,7 @@ import ( "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" ) @@ -35,7 +36,7 @@ func dbEncryptionKeyRun(_ *cobra.Command, _ []string) { log = log.With().Str("path", dbEncryptionKeyPath).Logger() // check if the key already exists - exists, err := pathExists(path.Join(flagOutdir, dbEncryptionKeyPath)) + exists, err := common.PathExists(path.Join(flagOutdir, dbEncryptionKeyPath)) if err != nil { log.Fatal().Err(err).Msg("could not check if db encryption key already exists") } @@ -50,5 +51,10 @@ func dbEncryptionKeyRun(_ *cobra.Command, _ []string) { } log.Info().Msg("generated db encryption key") - writeText(dbEncryptionKeyPath, dbEncryptionKey) + err = common.WriteText(dbEncryptionKeyPath, flagOutdir, dbEncryptionKey) + if err != nil { + log.Fatal().Err(err).Msg("failed to write file") + } + + log.Info().Msgf("wrote file %v", dbEncryptionKeyPath) } diff --git a/cmd/bootstrap/cmd/dkg.go b/cmd/bootstrap/cmd/dkg.go index 42d5d84d838..38c9626c340 100644 --- a/cmd/bootstrap/cmd/dkg.go +++ b/cmd/bootstrap/cmd/dkg.go @@ -6,6 +6,7 @@ import ( "github.com/onflow/crypto" bootstrapDKG "github.com/onflow/flow-go/cmd/bootstrap/dkg" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/dkg" "github.com/onflow/flow-go/model/encodable" @@ -38,17 +39,23 @@ func runBeaconKG(nodes []model.NodeInfo) dkg.DKGData { encKey := encodable.RandomBeaconPrivKey{PrivateKey: privKey} privKeyShares = append(privKeyShares, encKey) - writeJSON(fmt.Sprintf(model.PathRandomBeaconPriv, nodeID), encKey) + err = common.WriteJSON(fmt.Sprintf(model.PathRandomBeaconPriv, nodeID), flagOutdir, encKey) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } } // write full DKG info that will be used to construct QC - writeJSON(model.PathRootDKGData, inmem.EncodableFullDKG{ + err = common.WriteJSON(model.PathRootDKGData, flagOutdir, inmem.EncodableFullDKG{ GroupKey: encodable.RandomBeaconPubKey{ PublicKey: dkgData.PubGroupKey, }, PubKeyShares: pubKeyShares, PrivKeyShares: privKeyShares, }) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } return dkgData } diff --git a/cmd/bootstrap/cmd/final_list.go b/cmd/bootstrap/cmd/final_list.go index ac1b000876b..52db64980f9 100644 --- a/cmd/bootstrap/cmd/final_list.go +++ b/cmd/bootstrap/cmd/final_list.go @@ -4,6 +4,7 @@ import ( "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ) @@ -65,7 +66,10 @@ func finalList(cmd *cobra.Command, args []string) { validateNodes(localNodes, registeredNodes) // write node-config.json with the new list of nodes to be used for the `finalize` command - writeJSON(model.PathFinallist, model.ToPublicNodeInfoList(localNodes)) + err := common.WriteJSON(model.PathFinallist, flagOutdir, model.ToPublicNodeInfoList(localNodes)) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } } func validateNodes(localNodes []model.NodeInfo, registeredNodes []model.NodeInfo) { @@ -229,16 +233,16 @@ func checkMismatchingNodes(localNodes []model.NodeInfo, registeredNodes []model. } func assembleInternalNodesWithoutWeight() []model.NodeInfo { - privInternals := readInternalNodes() + privInternals := common.ReadInternalNodes(log, flagInternalNodePrivInfoDir) log.Info().Msgf("read %v internal private node-info files", len(privInternals)) var nodes []model.NodeInfo for _, internal := range privInternals { // check if address is valid format - validateAddressFormat(internal.Address) + common.ValidateAddressFormat(log, internal.Address) // validate every single internal node - nodeID := validateNodeID(internal.NodeID) + nodeID := ValidateNodeID(internal.NodeID) node := model.NewPrivateNodeInfo( nodeID, internal.Role, @@ -255,26 +259,29 @@ func assembleInternalNodesWithoutWeight() []model.NodeInfo { } func assemblePartnerNodesWithoutWeight() []model.NodeInfo { - partners := readPartnerNodes() + partners := common.ReadPartnerNodes(log, flagPartnerNodeInfoDir) log.Info().Msgf("read %v partner node configuration files", len(partners)) return createPublicNodeInfo(partners) } func readStakingContractDetails() []model.NodeInfo { var stakingNodes []model.NodeInfoPub - readJSON(flagStakingNodesPath, &stakingNodes) + err := common.ReadJSON(flagStakingNodesPath, &stakingNodes) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } return createPublicNodeInfo(stakingNodes) } func createPublicNodeInfo(nodes []model.NodeInfoPub) []model.NodeInfo { var publicInfoNodes []model.NodeInfo for _, n := range nodes { - validateAddressFormat(n.Address) + common.ValidateAddressFormat(log, n.Address) // validate every single partner node - nodeID := validateNodeID(n.NodeID) - networkPubKey := validateNetworkPubKey(n.NetworkPubKey) - stakingPubKey := validateStakingPubKey(n.StakingPubKey) + nodeID := ValidateNodeID(n.NodeID) + networkPubKey := ValidateNetworkPubKey(n.NetworkPubKey) + stakingPubKey := ValidateStakingPubKey(n.StakingPubKey) // all nodes should have equal weight node := model.NewPublicNodeInfo( diff --git a/cmd/bootstrap/cmd/finalize.go b/cmd/bootstrap/cmd/finalize.go index 7faee8967f4..ee4cce5e8c5 100644 --- a/cmd/bootstrap/cmd/finalize.go +++ b/cmd/bootstrap/cmd/finalize.go @@ -15,6 +15,7 @@ import ( "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/bootstrap/run" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/fvm" model "github.com/onflow/flow-go/model/bootstrap" @@ -115,11 +116,11 @@ func finalize(cmd *cobra.Command, args []string) { } log.Info().Msg("collecting partner network and staking keys") - partnerNodes := readPartnerNodeInfos() + partnerNodes := common.ReadPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) log.Info().Msg("") log.Info().Msg("generating internal private networking and staking keys") - internalNodes := readInternalNodeInfos() + internalNodes := common.ReadInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) log.Info().Msg("") log.Info().Msg("checking constraints on consensus nodes") @@ -195,7 +196,10 @@ func finalize(cmd *cobra.Command, args []string) { } // write snapshot to disk - writeJSON(model.PathRootProtocolStateSnapshot, snapshot.Encodable()) + err = common.WriteJSON(model.PathRootProtocolStateSnapshot, flagOutdir, snapshot.Encodable()) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } log.Info().Msg("") // read snapshot and verify consistency @@ -250,7 +254,7 @@ func finalize(cmd *cobra.Command, args []string) { log.Info().Msg("") // print count of all nodes - roleCounts := nodeCountByRole(stakingNodes) + roleCounts := common.NodeCountByRole(stakingNodes) log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", roleCounts[flow.RoleConsensus], flow.RoleConsensus.String())) log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", roleCounts[flow.RoleCollection], flow.RoleCollection.String())) log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", roleCounts[flow.RoleVerification], flow.RoleVerification.String())) @@ -263,7 +267,7 @@ func finalize(cmd *cobra.Command, args []string) { // readRootBlockVotes reads votes for root block func readRootBlockVotes() []*hotstuff.Vote { var votes []*hotstuff.Vote - files, err := filesInDir(flagRootBlockVotesDir) + files, err := common.FilesInDir(flagRootBlockVotesDir) if err != nil { log.Fatal().Err(err).Msg("could not read root block votes") } @@ -275,159 +279,17 @@ func readRootBlockVotes() []*hotstuff.Vote { // read file and append to partners var vote hotstuff.Vote - readJSON(f, &vote) + err = common.ReadJSON(f, &vote) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } + votes = append(votes, &vote) log.Info().Msgf("read vote %v for block %v from signerID %v", vote.ID(), vote.BlockID, vote.SignerID) } return votes } -// readPartnerNodeInfos returns a list of partner nodes after gathering weights -// and public key information from configuration files -func readPartnerNodeInfos() []model.NodeInfo { - partners := readPartnerNodes() - log.Info().Msgf("read %d partner node configuration files", len(partners)) - - var weights PartnerWeights - readJSON(flagPartnerWeights, &weights) - log.Info().Msgf("read %d weights for partner nodes", len(weights)) - - var nodes []model.NodeInfo - for _, partner := range partners { - // validate every single partner node - nodeID := validateNodeID(partner.NodeID) - networkPubKey := validateNetworkPubKey(partner.NetworkPubKey) - stakingPubKey := validateStakingPubKey(partner.StakingPubKey) - weight, valid := validateWeight(weights[partner.NodeID]) - if !valid { - log.Error().Msgf("weights: %v", weights) - log.Fatal().Msgf("partner node id %x has no weight", nodeID) - } - if weight != flow.DefaultInitialWeight { - log.Warn().Msgf("partner node (id=%x) has non-default weight (%d != %d)", partner.NodeID, weight, flow.DefaultInitialWeight) - } - - node := model.NewPublicNodeInfo( - nodeID, - partner.Role, - partner.Address, - weight, - networkPubKey.PublicKey, - stakingPubKey.PublicKey, - ) - nodes = append(nodes, node) - } - - return nodes -} - -// readPartnerNodes reads the partner node information -func readPartnerNodes() []model.NodeInfoPub { - var partners []model.NodeInfoPub - files, err := filesInDir(flagPartnerNodeInfoDir) - if err != nil { - log.Fatal().Err(err).Msg("could not read partner node infos") - } - for _, f := range files { - // skip files that do not include node-infos - if !strings.Contains(f, model.PathPartnerNodeInfoPrefix) { - continue - } - - // read file and append to partners - var p model.NodeInfoPub - readJSON(f, &p) - partners = append(partners, p) - } - return partners -} - -// readInternalNodeInfos returns a list of internal nodes after collecting weights -// from configuration files -func readInternalNodeInfos() []model.NodeInfo { - privInternals := readInternalNodes() - log.Info().Msgf("read %v internal private node-info files", len(privInternals)) - - weights := internalWeightsByAddress() - log.Info().Msgf("read %d weights for internal nodes", len(weights)) - - var nodes []model.NodeInfo - for _, internal := range privInternals { - // check if address is valid format - validateAddressFormat(internal.Address) - - // validate every single internal node - nodeID := validateNodeID(internal.NodeID) - weight, valid := validateWeight(weights[internal.Address]) - if !valid { - log.Error().Msgf("weights: %v", weights) - log.Fatal().Msgf("internal node %v has no weight. Did you forget to update the node address?", internal) - } - if weight != flow.DefaultInitialWeight { - log.Warn().Msgf("internal node (id=%x) has non-default weight (%d != %d)", internal.NodeID, weight, flow.DefaultInitialWeight) - } - - node := model.NewPrivateNodeInfo( - nodeID, - internal.Role, - internal.Address, - weight, - internal.NetworkPrivKey, - internal.StakingPrivKey, - ) - - nodes = append(nodes, node) - } - - return nodes -} - -// readInternalNodes reads our internal node private infos generated by -// `keygen` command and returns it -func readInternalNodes() []model.NodeInfoPriv { - var internalPrivInfos []model.NodeInfoPriv - - // get files in internal priv node infos directory - files, err := filesInDir(flagInternalNodePrivInfoDir) - if err != nil { - log.Fatal().Err(err).Msg("could not read partner node infos") - } - - // for each of the files - for _, f := range files { - // skip files that do not include node-infos - if !strings.Contains(f, model.PathPrivNodeInfoPrefix) { - continue - } - - // read file and append to partners - var p model.NodeInfoPriv - readJSON(f, &p) - internalPrivInfos = append(internalPrivInfos, p) - } - - return internalPrivInfos -} - -// internalWeightsByAddress returns a mapping of node address by weight for internal nodes -func internalWeightsByAddress() map[string]uint64 { - // read json - var configs []model.NodeConfig - readJSON(flagConfig, &configs) - log.Info().Interface("config", configs).Msgf("read internal node configurations") - - weights := make(map[string]uint64) - for _, config := range configs { - if _, ok := weights[config.Address]; !ok { - weights[config.Address] = config.Weight - } else { - log.Error().Msgf("duplicate internal node address %s", config.Address) - } - } - - return weights -} - // mergeNodeInfos merges the internal and partner nodes and checks if there are no // duplicate addresses or node Ids. // @@ -495,28 +357,28 @@ func readDKGData() dkg.DKGData { // Validation utility methods ------------------------------------------------ -func validateNodeID(nodeID flow.Identifier) flow.Identifier { +func ValidateNodeID(nodeID flow.Identifier) flow.Identifier { if nodeID == flow.ZeroID { log.Fatal().Msg("NodeID must not be zero") } return nodeID } -func validateNetworkPubKey(key encodable.NetworkPubKey) encodable.NetworkPubKey { +func ValidateNetworkPubKey(key encodable.NetworkPubKey) encodable.NetworkPubKey { if key.PublicKey == nil { log.Fatal().Msg("NetworkPubKey must not be nil") } return key } -func validateStakingPubKey(key encodable.StakingPubKey) encodable.StakingPubKey { +func ValidateStakingPubKey(key encodable.StakingPubKey) encodable.StakingPubKey { if key.PublicKey == nil { log.Fatal().Msg("StakingPubKey must not be nil") } return key } -func validateWeight(weight uint64) (uint64, bool) { +func ValidateWeight(weight uint64) (uint64, bool) { return weight, weight > 0 } diff --git a/cmd/bootstrap/cmd/finalize_test.go b/cmd/bootstrap/cmd/finalize_test.go index 47d892350f1..395b1a4a774 100644 --- a/cmd/bootstrap/cmd/finalize_test.go +++ b/cmd/bootstrap/cmd/finalize_test.go @@ -9,10 +9,12 @@ import ( "strings" "testing" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" utils "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -110,15 +112,16 @@ func TestClusterAssignment(t *testing.T) { partners := unittest.NodeInfosFixture(partnersLen, unittest.WithRole(flow.RoleCollection)) internals := unittest.NodeInfosFixture(internalLen, unittest.WithRole(flow.RoleCollection)) + log := zerolog.Nop() // should not error - _, clusters, err := constructClusterAssignment(partners, internals) + _, clusters, err := common.ConstructClusterAssignment(log, partners, internals, int(flagCollectionClusters)) require.NoError(t, err) require.True(t, checkClusterConstraint(clusters, partners, internals)) // unhappy Path internals = internals[:21] // reduce one internal node // should error - _, _, err = constructClusterAssignment(partners, internals) + _, _, err = common.ConstructClusterAssignment(log, partners, internals, int(flagCollectionClusters)) require.Error(t, err) // revert the flag value flagCollectionClusters = tmp diff --git a/cmd/bootstrap/cmd/genconfig.go b/cmd/bootstrap/cmd/genconfig.go index ccf66104ecc..f8b565ca704 100644 --- a/cmd/bootstrap/cmd/genconfig.go +++ b/cmd/bootstrap/cmd/genconfig.go @@ -5,6 +5,7 @@ import ( "github.com/spf13/cobra" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ) @@ -56,7 +57,10 @@ func genconfigCmdRun(_ *cobra.Command, _ []string) { configs = append(configs, createConf(flow.RoleVerification, i)) } - writeJSON(flagConfig, configs) + err := common.WriteJSON(flagConfig, flagOutdir, configs) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } } // genconfigCmd represents the genconfig command diff --git a/cmd/bootstrap/cmd/key.go b/cmd/bootstrap/cmd/key.go index d8cdc46afa1..8265b93dd6c 100644 --- a/cmd/bootstrap/cmd/key.go +++ b/cmd/bootstrap/cmd/key.go @@ -2,18 +2,14 @@ package cmd import ( "fmt" - "net" - "strconv" - - "github.com/multiformats/go-multiaddr" "github.com/onflow/crypto" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" - p2putils "github.com/onflow/flow-go/network/p2p/utils" ) var ( @@ -75,7 +71,7 @@ func keyCmdRun(_ *cobra.Command, _ []string) { // validate inputs role := validateRole(flagRole) - validateAddressFormat(flagAddress) + common.ValidateAddressFormat(log, flagAddress) // generate staking and network keys networkKey, stakingKey, secretsDBKey, err := generateKeys() @@ -97,11 +93,27 @@ func keyCmdRun(_ *cobra.Command, _ []string) { } // write files - writeText(model.PathNodeID, []byte(nodeInfo.NodeID.String())) - writeJSON(fmt.Sprintf(model.PathNodeInfoPriv, nodeInfo.NodeID), private) - writeText(fmt.Sprintf(model.PathSecretsEncryptionKey, nodeInfo.NodeID), secretsDBKey) - writeJSON(fmt.Sprintf(model.PathNodeInfoPub, nodeInfo.NodeID), nodeInfo.Public()) + err = common.WriteText(model.PathNodeID, flagOutdir, []byte(nodeInfo.NodeID.String())) + if err != nil { + log.Fatal().Err(err).Msg("failed to write file") + } + log.Info().Msgf("wrote file %v", model.PathNodeID) + + err = common.WriteJSON(fmt.Sprintf(model.PathNodeInfoPriv, nodeInfo.NodeID), flagOutdir, private) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + err = common.WriteText(fmt.Sprintf(model.PathSecretsEncryptionKey, nodeInfo.NodeID), flagOutdir, secretsDBKey) + if err != nil { + log.Fatal().Err(err).Msg("failed to write file") + } + log.Info().Msgf("wrote file %v", model.PathSecretsEncryptionKey) + + err = common.WriteJSON(fmt.Sprintf(model.PathNodeInfoPub, nodeInfo.NodeID), flagOutdir, nodeInfo.Public()) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } // write machine account info if role == flow.RoleCollection || role == flow.RoleConsensus { @@ -114,7 +126,10 @@ func keyCmdRun(_ *cobra.Command, _ []string) { log.Debug().Str("address", flagAddress).Msg("assembling machine account information") // write the public key to terminal for entry in Flow Port machineAccountPriv := assembleNodeMachineAccountKey(machineKey) - writeJSON(fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeInfo.NodeID), machineAccountPriv) + err = common.WriteJSON(fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeInfo.NodeID), flagOutdir, machineAccountPriv) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } } } @@ -164,27 +179,3 @@ func validateRole(role string) flow.Role { } return parsed } - -// validateAddressFormat validates the address provided by pretty much doing what the network layer would do before -// starting the node -func validateAddressFormat(address string) { - checkErr := func(err error) { - if err != nil { - log.Fatal().Err(err).Str("address", address).Msg("invalid address format.\n" + - `Address needs to be in the format hostname:port or ip:port e.g. "flow.com:3569"`) - } - } - - // split address into ip/hostname and port - ip, port, err := net.SplitHostPort(address) - checkErr(err) - - // check that port number is indeed a number - _, err = strconv.Atoi(port) - checkErr(err) - - // create a libp2p address from the ip and port - lp2pAddr := p2putils.MultiAddressStr(ip, port) - _, err = multiaddr.NewMultiaddr(lp2pAddr) - checkErr(err) -} diff --git a/cmd/bootstrap/cmd/keygen.go b/cmd/bootstrap/cmd/keygen.go index 62457fe4b56..72de2201d97 100644 --- a/cmd/bootstrap/cmd/keygen.go +++ b/cmd/bootstrap/cmd/keygen.go @@ -5,11 +5,11 @@ import ( "io" "os" - "github.com/onflow/flow-go/cmd" - "github.com/onflow/flow-go/cmd/bootstrap/utils" - "github.com/spf13/cobra" + "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" ) @@ -22,7 +22,7 @@ var keygenCmd = &cobra.Command{ Long: `Generate Staking and Networking keys for a list of nodes provided by the flag '--config'`, Run: func(cmd *cobra.Command, args []string) { // check if out directory exists - exists, err := pathExists(flagOutdir) + exists, err := common.PathExists(flagOutdir) if err != nil { log.Error().Msg("could not check if directory exists") return @@ -49,12 +49,10 @@ var keygenCmd = &cobra.Command{ // write key files writeJSONFile := func(relativePath string, val interface{}) error { - writeJSON(relativePath, val) - return nil + return common.WriteJSON(relativePath, flagOutdir, val) } writeFile := func(relativePath string, data []byte) error { - writeText(relativePath, data) - return nil + return common.WriteText(relativePath, flagOutdir, data) } log.Info().Msg("writing internal private key files") @@ -85,7 +83,7 @@ var keygenCmd = &cobra.Command{ } // count roles - roleCounts := nodeCountByRole(nodes) + roleCounts := common.NodeCountByRole(nodes) for role, count := range roleCounts { log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", count, role.String())) } @@ -127,5 +125,8 @@ func genNodePubInfo(nodes []model.NodeInfo) { for _, node := range nodes { pubNodes = append(pubNodes, node.Public()) } - writeJSON(model.PathInternalNodeInfosPub, pubNodes) + err := common.WriteJSON(model.PathInternalNodeInfosPub, flagOutdir, pubNodes) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } } diff --git a/cmd/bootstrap/cmd/keys.go b/cmd/bootstrap/cmd/keys.go index b9d32528727..7d3f053a714 100644 --- a/cmd/bootstrap/cmd/keys.go +++ b/cmd/bootstrap/cmd/keys.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/crypto" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" @@ -20,7 +21,11 @@ import ( func genNetworkAndStakingKeys() []model.NodeInfo { var nodeConfigs []model.NodeConfig - readJSON(flagConfig, &nodeConfigs) + err := common.ReadJSON(flagConfig, &nodeConfigs) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } + nodes := len(nodeConfigs) log.Info().Msgf("read %v node configurations", nodes) @@ -62,8 +67,8 @@ func assembleNodeInfo(nodeConfig model.NodeConfig, networkKey, stakingKey crypto } log.Debug(). - Str("networkPubKey", pubKeyToString(networkKey.PublicKey())). - Str("stakingPubKey", pubKeyToString(stakingKey.PublicKey())). + Str("networkPubKey", common.PubKeyToString(networkKey.PublicKey())). + Str("stakingPubKey", common.PubKeyToString(stakingKey.PublicKey())). Msg("encoded public staking and network keys") nodeInfo := model.NewPrivateNodeInfo( diff --git a/cmd/bootstrap/cmd/machine_account.go b/cmd/bootstrap/cmd/machine_account.go index a1305ae1035..bc16565e267 100644 --- a/cmd/bootstrap/cmd/machine_account.go +++ b/cmd/bootstrap/cmd/machine_account.go @@ -9,6 +9,7 @@ import ( "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ioutils "github.com/onflow/flow-go/utils/io" @@ -52,7 +53,7 @@ func machineAccountRun(_ *cobra.Command, _ []string) { // check if node-machine-account-key.priv.json path exists machineAccountKeyPath := fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeID) - keyExists, err := pathExists(filepath.Join(flagOutdir, machineAccountKeyPath)) + keyExists, err := common.PathExists(filepath.Join(flagOutdir, machineAccountKeyPath)) if err != nil { log.Fatal().Err(err).Msg("could not check if node-machine-account-key.priv.json exists") } @@ -63,7 +64,7 @@ func machineAccountRun(_ *cobra.Command, _ []string) { // check if node-machine-account-info.priv.json file exists in boostrap dir machineAccountInfoPath := fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID) - infoExists, err := pathExists(filepath.Join(flagOutdir, machineAccountInfoPath)) + infoExists, err := common.PathExists(filepath.Join(flagOutdir, machineAccountInfoPath)) if err != nil { log.Fatal().Err(err).Msg("could not check if node-machine-account-info.priv.json exists") } @@ -80,7 +81,10 @@ func machineAccountRun(_ *cobra.Command, _ []string) { machineAccountInfo := assembleNodeMachineAccountInfo(machinePrivKey, flagMachineAccountAddress) // write machine account info - writeJSON(fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID), machineAccountInfo) + err = common.WriteJSON(fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID), flagOutdir, machineAccountInfo) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } } // readMachineAccountPriv reads the machine account private key files in the bootstrap dir @@ -88,7 +92,10 @@ func readMachineAccountKey(nodeID string) crypto.PrivateKey { var machineAccountPriv model.NodeMachineAccountKey path := filepath.Join(flagOutdir, fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeID)) - readJSON(path, &machineAccountPriv) + err := common.ReadJSON(path, &machineAccountPriv) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } return machineAccountPriv.PrivateKey.PrivateKey } diff --git a/cmd/bootstrap/cmd/machine_account_key.go b/cmd/bootstrap/cmd/machine_account_key.go index 9ec26c68520..ee7c01ebad2 100644 --- a/cmd/bootstrap/cmd/machine_account_key.go +++ b/cmd/bootstrap/cmd/machine_account_key.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" ) @@ -37,7 +38,7 @@ func machineAccountKeyRun(_ *cobra.Command, _ []string) { // check if node-machine-account-key.priv.json path exists machineAccountKeyPath := fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeID) - keyExists, err := pathExists(path.Join(flagOutdir, machineAccountKeyPath)) + keyExists, err := common.PathExists(path.Join(flagOutdir, machineAccountKeyPath)) if err != nil { log.Fatal().Err(err).Msg("could not check if node-machine-account-key.priv.json exists") } @@ -56,5 +57,8 @@ func machineAccountKeyRun(_ *cobra.Command, _ []string) { // also write the public key to terminal for entry in Flow Port machineAccountPriv := assembleNodeMachineAccountKey(machineKey) - writeJSON(machineAccountKeyPath, machineAccountPriv) + err = common.WriteJSON(machineAccountKeyPath, flagOutdir, machineAccountPriv) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } } diff --git a/cmd/bootstrap/cmd/machine_account_key_test.go b/cmd/bootstrap/cmd/machine_account_key_test.go index adcf45ea4b2..dfd93fcd5f6 100644 --- a/cmd/bootstrap/cmd/machine_account_key_test.go +++ b/cmd/bootstrap/cmd/machine_account_key_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" ioutils "github.com/onflow/flow-go/utils/io" @@ -49,7 +50,7 @@ func TestMachineAccountKeyFileExists(t *testing.T) { // read file priv key file before command var machineAccountPrivBefore model.NodeMachineAccountKey - readJSON(machineKeyFilePath, &machineAccountPrivBefore) + require.NoError(t, common.ReadJSON(machineKeyFilePath, &machineAccountPrivBefore)) // run command with flags machineAccountKeyRun(nil, nil) @@ -59,7 +60,7 @@ func TestMachineAccountKeyFileExists(t *testing.T) { // read machine account key file again var machineAccountPrivAfter model.NodeMachineAccountKey - readJSON(machineKeyFilePath, &machineAccountPrivAfter) + require.NoError(t, common.ReadJSON(machineKeyFilePath, &machineAccountPrivAfter)) // check if key was modified assert.Equal(t, machineAccountPrivBefore, machineAccountPrivAfter) diff --git a/cmd/bootstrap/cmd/machine_account_test.go b/cmd/bootstrap/cmd/machine_account_test.go index 7a1627ca3ac..27631a3bddc 100644 --- a/cmd/bootstrap/cmd/machine_account_test.go +++ b/cmd/bootstrap/cmd/machine_account_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" @@ -115,14 +116,14 @@ func TestMachineAccountInfoFileExists(t *testing.T) { // read in info file var machineAccountInfoBefore model.NodeMachineAccountInfo - readJSON(machineInfoFilePath, &machineAccountInfoBefore) + require.NoError(t, common.ReadJSON(machineInfoFilePath, &machineAccountInfoBefore)) // run again and make sure info file was not changed machineAccountRun(nil, nil) require.Regexp(t, regex, hook.logs.String()) var machineAccountInfoAfter model.NodeMachineAccountInfo - readJSON(machineInfoFilePath, &machineAccountInfoAfter) + require.NoError(t, common.ReadJSON(machineInfoFilePath, &machineAccountInfoAfter)) assert.Equal(t, machineAccountInfoBefore, machineAccountInfoAfter) }) diff --git a/cmd/bootstrap/cmd/observer_network_key.go b/cmd/bootstrap/cmd/observer_network_key.go index 330b2cad47e..dfb6a2f609e 100644 --- a/cmd/bootstrap/cmd/observer_network_key.go +++ b/cmd/bootstrap/cmd/observer_network_key.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" ) var ( @@ -47,7 +48,7 @@ func observerNetworkKeyRun(_ *cobra.Command, _ []string) { } // if the file already exists, exit - keyExists, err := pathExists(flagOutputFile) + keyExists, err := common.PathExists(flagOutputFile) if err != nil { log.Fatal().Err(err).Msgf("could not check if %s exists", flagOutputFile) } diff --git a/cmd/bootstrap/cmd/partner_infos.go b/cmd/bootstrap/cmd/partner_infos.go index 05db3192609..d60fb7ac97e 100644 --- a/cmd/bootstrap/cmd/partner_infos.go +++ b/cmd/bootstrap/cmd/partner_infos.go @@ -203,12 +203,18 @@ func validateANNetworkKey(key string) error { // writeNodePubInfoFile writes the node-pub-info file func writeNodePubInfoFile(info *bootstrap.NodeInfoPub) { fileOutputPath := fmt.Sprintf(bootstrap.PathNodeInfoPub, info.NodeID) - writeJSON(fileOutputPath, info) + err := common.WriteJSON(fileOutputPath, flagOutdir, info) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } } // writePartnerWeightsFile writes the partner weights file func writePartnerWeightsFile(partnerWeights PartnerWeights) { - writeJSON(bootstrap.FileNamePartnerWeights, partnerWeights) + err := common.WriteJSON(bootstrap.FileNamePartnerWeights, flagOutdir, partnerWeights) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } } func printNodeCounts(numOfNodesByType map[flow.Role]int, totalNumOfPartnerNodes, skippedNodes int) { diff --git a/cmd/bootstrap/cmd/qc.go b/cmd/bootstrap/cmd/qc.go index 6e97363051b..6b47bf6cf6f 100644 --- a/cmd/bootstrap/cmd/qc.go +++ b/cmd/bootstrap/cmd/qc.go @@ -5,6 +5,7 @@ import ( "path/filepath" "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/dkg" @@ -48,6 +49,9 @@ func constructRootVotes(block *flow.Block, allNodes, internalNodes []bootstrap.N for _, vote := range votes { path := filepath.Join(bootstrap.DirnameRootBlockVotes, fmt.Sprintf(bootstrap.FilenameRootBlockVote, vote.SignerID)) - writeJSON(path, vote) + err = common.WriteJSON(path, flagOutdir, vote) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } } } diff --git a/cmd/bootstrap/cmd/rootblock.go b/cmd/bootstrap/cmd/rootblock.go index e32b9d95811..c745c4f4044 100644 --- a/cmd/bootstrap/cmd/rootblock.go +++ b/cmd/bootstrap/cmd/rootblock.go @@ -19,6 +19,7 @@ import ( "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ) @@ -144,11 +145,11 @@ func rootBlock(cmd *cobra.Command, args []string) { } log.Info().Msg("collecting partner network and staking keys") - partnerNodes := readPartnerNodeInfos() + partnerNodes := common.ReadPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) log.Info().Msg("") log.Info().Msg("generating internal private networking and staking keys") - internalNodes := readInternalNodeInfos() + internalNodes := common.ReadInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) log.Info().Msg("") log.Info().Msg("checking constraints on consensus nodes") @@ -157,7 +158,10 @@ func rootBlock(cmd *cobra.Command, args []string) { log.Info().Msg("assembling network and staking keys") stakingNodes := mergeNodeInfos(internalNodes, partnerNodes) - writeJSON(model.PathNodeInfosPub, model.ToPublicNodeInfoList(stakingNodes)) + err = common.WriteJSON(model.PathNodeInfosPub, flagOutdir, model.ToPublicNodeInfoList(stakingNodes)) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } log.Info().Msg("") log.Info().Msg("running DKG for consensus nodes") @@ -168,7 +172,7 @@ func rootBlock(cmd *cobra.Command, args []string) { participants := model.ToIdentityList(stakingNodes).Sort(flow.Canonical[flow.Identity]) log.Info().Msg("computing collection node clusters") - assignments, clusters, err := constructClusterAssignment(partnerNodes, internalNodes) + assignments, clusters, err := common.ConstructClusterAssignment(log, partnerNodes, internalNodes, int(flagCollectionClusters)) if err != nil { log.Fatal().Err(err).Msg("unable to generate cluster assignment") } @@ -179,7 +183,7 @@ func rootBlock(cmd *cobra.Command, args []string) { log.Info().Msg("") log.Info().Msg("constructing root QCs for collection node clusters") - clusterQCs := constructRootQCsForClusters(clusters, internalNodes, clusterBlocks) + clusterQCs := common.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) log.Info().Msg("") log.Info().Msg("constructing root header") @@ -206,12 +210,18 @@ func rootBlock(cmd *cobra.Command, args []string) { IntermediaryEpochData: intermediaryEpochData, IntermediaryParamsData: intermediaryParamsData, } - writeJSON(model.PathIntermediaryBootstrappingData, intermediaryData) + err = common.WriteJSON(model.PathIntermediaryBootstrappingData, flagOutdir, intermediaryData) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } log.Info().Msg("") log.Info().Msg("constructing root block") block := constructRootBlock(header, epochSetup, epochCommit) - writeJSON(model.PathRootBlockData, block) + err = common.WriteJSON(model.PathRootBlockData, flagOutdir, block) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } log.Info().Msg("") log.Info().Msg("constructing and writing votes") diff --git a/cmd/bootstrap/cmd/util.go b/cmd/bootstrap/cmd/util.go index 38bdc481c8a..ea89d1d2db6 100644 --- a/cmd/bootstrap/cmd/util.go +++ b/cmd/bootstrap/cmd/util.go @@ -2,16 +2,6 @@ package cmd import ( "crypto/rand" - "encoding/json" - "fmt" - "os" - "path/filepath" - - "github.com/onflow/crypto" - - model "github.com/onflow/flow-go/model/bootstrap" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/io" ) func GenerateRandomSeeds(n int, seedLen int) [][]byte { @@ -29,90 +19,3 @@ func GenerateRandomSeed(seedLen int) []byte { } return seed } - -func readJSON(path string, target interface{}) { - dat, err := io.ReadFile(path) - if err != nil { - log.Fatal().Err(err).Msg("cannot read json") - } - err = json.Unmarshal(dat, target) - if err != nil { - log.Fatal().Err(err).Msgf("cannot unmarshal json in file %s", path) - } -} - -func writeJSON(path string, data interface{}) { - bz, err := json.MarshalIndent(data, "", " ") - if err != nil { - log.Fatal().Err(err).Msg("cannot marshal json") - } - - writeText(path, bz) -} - -func writeText(path string, data []byte) { - path = filepath.Join(flagOutdir, path) - - err := os.MkdirAll(filepath.Dir(path), 0755) - if err != nil { - log.Fatal().Err(err).Msg("could not create output dir") - } - - err = os.WriteFile(path, data, 0644) - if err != nil { - log.Fatal().Err(err).Msg("could not write file") - } - - log.Info().Msgf("wrote file %v", path) -} - -func pubKeyToString(key crypto.PublicKey) string { - return fmt.Sprintf("%x", key.Encode()) -} - -func filesInDir(dir string) ([]string, error) { - exists, err := pathExists(dir) - if err != nil { - return nil, fmt.Errorf("could not check if dir exists: %w", err) - } - - if !exists { - return nil, fmt.Errorf("dir %v does not exist", dir) - } - - var files []string - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if !info.IsDir() { - files = append(files, path) - } - return nil - }) - return files, err -} - -// pathExists -func pathExists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func nodeCountByRole(nodes []model.NodeInfo) map[flow.Role]uint16 { - roleCounts := map[flow.Role]uint16{ - flow.RoleCollection: 0, - flow.RoleConsensus: 0, - flow.RoleExecution: 0, - flow.RoleVerification: 0, - flow.RoleAccess: 0, - } - for _, node := range nodes { - roleCounts[node.Role] = roleCounts[node.Role] + 1 - } - - return roleCounts -} diff --git a/cmd/dynamic_startup.go b/cmd/dynamic_startup.go index 49ccd3dcb7a..89b6d0a2054 100644 --- a/cmd/dynamic_startup.go +++ b/cmd/dynamic_startup.go @@ -3,116 +3,20 @@ package cmd import ( "context" "encoding/hex" - "encoding/json" "fmt" "path/filepath" "strconv" "strings" - "time" "github.com/onflow/crypto" - "github.com/rs/zerolog" - "github.com/sethvargo/go-retry" - - client "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" badgerstate "github.com/onflow/flow-go/state/protocol/badger" utilsio "github.com/onflow/flow-go/utils/io" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol/inmem" ) -const getSnapshotTimeout = 30 * time.Second - -// GetProtocolSnapshot callback that will get latest finalized protocol snapshot -type GetProtocolSnapshot func(ctx context.Context) (protocol.Snapshot, error) - -// GetSnapshot will attempt to get the latest finalized protocol snapshot with the given flow configs -func GetSnapshot(ctx context.Context, client *client.Client) (*inmem.Snapshot, error) { - ctx, cancel := context.WithTimeout(ctx, getSnapshotTimeout) - defer cancel() - - b, err := client.GetLatestProtocolStateSnapshot(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get latest finalized protocol state snapshot during pre-initialization: %w", err) - } - - var snapshotEnc inmem.EncodableSnapshot - err = json.Unmarshal(b, &snapshotEnc) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal protocol state snapshot: %w", err) - } - - snapshot := inmem.SnapshotFromEncodable(snapshotEnc) - return snapshot, nil -} - -// GetSnapshotAtEpochAndPhase will get the latest finalized protocol snapshot and check the current epoch and epoch phase. -// If we are past the target epoch and epoch phase we exit the retry mechanism immediately. -// If not check the snapshot at the specified interval until we reach the target epoch and phase. -func GetSnapshotAtEpochAndPhase(ctx context.Context, log zerolog.Logger, startupEpoch uint64, startupEpochPhase flow.EpochPhase, retryInterval time.Duration, getSnapshot GetProtocolSnapshot) (protocol.Snapshot, error) { - start := time.Now() - - log = log.With(). - Uint64("target_epoch_counter", startupEpoch). - Str("target_epoch_phase", startupEpochPhase.String()). - Logger() - - log.Info().Msg("starting dynamic startup - waiting until target epoch/phase to start...") - - var snapshot protocol.Snapshot - var err error - - backoff := retry.NewConstant(retryInterval) - err = retry.Do(ctx, backoff, func(ctx context.Context) error { - snapshot, err = getSnapshot(ctx) - if err != nil { - err = fmt.Errorf("failed to get protocol snapshot: %w", err) - log.Error().Err(err).Msg("could not get protocol snapshot") - return retry.RetryableError(err) - } - - // if we encounter any errors interpreting the snapshot something went wrong stop retrying - currEpochCounter, err := snapshot.Epochs().Current().Counter() - if err != nil { - return fmt.Errorf("failed to get the current epoch counter: %w", err) - } - - currEpochPhase, err := snapshot.Phase() - if err != nil { - return fmt.Errorf("failed to get the current epoch phase: %w", err) - } - - // check if we are in or past the target epoch and phase - if currEpochCounter > startupEpoch || (currEpochCounter == startupEpoch && currEpochPhase >= startupEpochPhase) { - log.Info(). - Dur("time-waiting", time.Since(start)). - Uint64("current-epoch", currEpochCounter). - Str("current-epoch-phase", currEpochPhase.String()). - Msg("finished dynamic startup - reached desired epoch and phase") - - return nil - } - - // wait then poll for latest snapshot again - log.Info(). - Dur("time-waiting", time.Since(start)). - Uint64("current-epoch", currEpochCounter). - Str("current-epoch-phase", currEpochPhase.String()). - Msgf("waiting for epoch %d and phase %s", startupEpoch, startupEpochPhase.String()) - - return retry.RetryableError(fmt.Errorf("dynamic startup epoch and epoch phase not reached")) - }) - if err != nil { - return nil, fmt.Errorf("failed to wait for target epoch and phase: %w", err) - } - - return snapshot, nil -} - // ValidateDynamicStartupFlags will validate flags necessary for dynamic node startup // - assert dynamic-startup-access-publickey is valid ECDSA_P256 public key hex // - assert dynamic-startup-access-address is not empty @@ -182,7 +86,7 @@ func DynamicStartPreInit(nodeConfig *NodeConfig) error { } getSnapshotFunc := func(ctx context.Context) (protocol.Snapshot, error) { - return GetSnapshot(ctx, flowClient) + return common.GetSnapshot(ctx, flowClient) } // validate dynamic startup epoch flag @@ -199,7 +103,7 @@ func DynamicStartPreInit(nodeConfig *NodeConfig) error { return err } - snapshot, err := GetSnapshotAtEpochAndPhase( + snapshot, err := common.GetSnapshotAtEpochAndPhase( ctx, log, startupEpoch, @@ -218,7 +122,7 @@ func DynamicStartPreInit(nodeConfig *NodeConfig) error { // validateDynamicStartEpochFlags parse the start epoch flag and return the uin64 value, // if epoch = current return the current epoch counter -func validateDynamicStartEpochFlags(ctx context.Context, getSnapshot GetProtocolSnapshot, flagEpoch string) (uint64, error) { +func validateDynamicStartEpochFlags(ctx context.Context, getSnapshot common.GetProtocolSnapshot, flagEpoch string) (uint64, error) { // if flag is not `current` sentinel, it must be a specific epoch counter (uint64) if flagEpoch != "current" { diff --git a/cmd/dynamic_startup_test.go b/cmd/dynamic_startup_test.go index 775e8221fbf..27da13fca72 100644 --- a/cmd/dynamic_startup_test.go +++ b/cmd/dynamic_startup_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" protocolmock "github.com/onflow/flow-go/state/protocol/mock" @@ -87,7 +88,7 @@ func TestGetSnapshotAtEpochAndPhase(t *testing.T) { _, _, targetPhase, targetEpoch := dynamicJoinFlagsFixture() // get snapshot - actualSnapshot, err := GetSnapshotAtEpochAndPhase( + actualSnapshot, err := common.GetSnapshotAtEpochAndPhase( context.Background(), unittest.Logger(), targetEpoch, @@ -113,7 +114,7 @@ func TestGetSnapshotAtEpochAndPhase(t *testing.T) { _, _, targetPhase, targetEpoch := dynamicJoinFlagsFixture() // get snapshot - actualSnapshot, err := GetSnapshotAtEpochAndPhase( + actualSnapshot, err := common.GetSnapshotAtEpochAndPhase( context.Background(), unittest.Logger(), targetEpoch, @@ -143,7 +144,7 @@ func TestGetSnapshotAtEpochAndPhase(t *testing.T) { _, _, targetPhase, _ := dynamicJoinFlagsFixture() // get snapshot - actualSnapshot, err := GetSnapshotAtEpochAndPhase( + actualSnapshot, err := common.GetSnapshotAtEpochAndPhase( context.Background(), unittest.Logger(), 5, diff --git a/cmd/bootstrap/cmd/clusters.go b/cmd/util/cmd/common/clusters.go similarity index 76% rename from cmd/bootstrap/cmd/clusters.go rename to cmd/util/cmd/common/clusters.go index 27ab1c52605..b8055acc2a1 100644 --- a/cmd/bootstrap/cmd/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -1,9 +1,12 @@ -package cmd +package common import ( "errors" + "github.com/rs/zerolog" + "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" @@ -12,7 +15,7 @@ import ( "github.com/onflow/flow-go/model/flow/filter" ) -// Construct random cluster assignment with internal and partner nodes. +// ConstructClusterAssignment random cluster assignment with internal and partner nodes. // The number of clusters is read from the `flagCollectionClusters` flag. // The number of nodes in each cluster is deterministic and only depends on the number of clusters // and the number of nodes. The repartition of internal and partner nodes is also deterministic @@ -24,17 +27,16 @@ import ( // satisfied, an exception is returned. // Note that if an exception is returned with a certain number of internal/partner nodes, there is no chance // of succeeding the assignment by re-running the function without increasing the internal nodes ratio. -func constructClusterAssignment(partnerNodes, internalNodes []model.NodeInfo) (flow.AssignmentList, flow.ClusterList, error) { +func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes []bootstrap.NodeInfo, numCollectionClusters int) (flow.AssignmentList, flow.ClusterList, error) { - partners := model.ToIdentityList(partnerNodes).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) - internals := model.ToIdentityList(internalNodes).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) - nClusters := int(flagCollectionClusters) + partners := bootstrap.ToIdentityList(partnerNodes).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + internals := bootstrap.ToIdentityList(internalNodes).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) nCollectors := len(partners) + len(internals) // ensure we have at least as many collection nodes as clusters - if nCollectors < int(flagCollectionClusters) { + if nCollectors < int(numCollectionClusters) { log.Fatal().Msgf("network bootstrap is configured with %d collection nodes, but %d clusters - must have at least one collection node per cluster", - nCollectors, flagCollectionClusters) + nCollectors, numCollectionClusters) } // shuffle both collector lists based on a non-deterministic algorithm @@ -47,24 +49,24 @@ func constructClusterAssignment(partnerNodes, internalNodes []model.NodeInfo) (f log.Fatal().Err(err).Msg("could not shuffle internals") } - identifierLists := make([]flow.IdentifierList, nClusters) + identifierLists := make([]flow.IdentifierList, numCollectionClusters) // array to track the 2/3 internal-nodes constraint (internal_nodes > 2 * partner_nodes) - constraint := make([]int, nClusters) + constraint := make([]int, numCollectionClusters) // first, round-robin internal nodes into each cluster for i, node := range internals { - identifierLists[i%nClusters] = append(identifierLists[i%nClusters], node.NodeID) - constraint[i%nClusters] += 1 + identifierLists[i%numCollectionClusters] = append(identifierLists[i%numCollectionClusters], node.NodeID) + constraint[i%numCollectionClusters] += 1 } // next, round-robin partner nodes into each cluster for i, node := range partners { identifierLists[i%len(identifierLists)] = append(identifierLists[i%len(identifierLists)], node.NodeID) - constraint[i%nClusters] -= 2 + constraint[i%numCollectionClusters] -= 2 } // check the 2/3 constraint: for every cluster `i`, constraint[i] must be strictly positive - for i := 0; i < nClusters; i++ { + for i := 0; i < numCollectionClusters; i++ { if constraint[i] <= 0 { return nil, nil, errors.New("there isn't enough internal nodes to have at least 2/3 internal nodes in each cluster") } @@ -81,11 +83,7 @@ func constructClusterAssignment(partnerNodes, internalNodes []model.NodeInfo) (f return assignments, clusters, nil } -func constructRootQCsForClusters( - clusterList flow.ClusterList, - nodeInfos []model.NodeInfo, - clusterBlocks []*cluster.Block, -) []*flow.QuorumCertificate { +func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block) []*flow.QuorumCertificate { if len(clusterBlocks) != len(clusterList) { log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)). diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go new file mode 100644 index 00000000000..39aadafb578 --- /dev/null +++ b/cmd/util/cmd/common/node_info.go @@ -0,0 +1,169 @@ +package common + +import ( + "strings" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/cmd/bootstrap/cmd" + "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" +) + +// ReadPartnerNodeInfos returns a list of partner nodes after gathering weights +// and public key information from configuration files +func ReadPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNodeInfoDir string) []bootstrap.NodeInfo { + partners := ReadPartnerNodes(log, partnerNodeInfoDir) + log.Info().Msgf("read %d partner node configuration files", len(partners)) + + var weights cmd.PartnerWeights + err := ReadJSON(partnerWeightsPath, &weights) + if err != nil { + log.Fatal().Err(err).Msg("failed to read partner weights json") + } + log.Info().Msgf("read %d weights for partner nodes", len(weights)) + + var nodes []bootstrap.NodeInfo + for _, partner := range partners { + // validate every single partner node + nodeID := cmd.ValidateNodeID(partner.NodeID) + networkPubKey := cmd.ValidateNetworkPubKey(partner.NetworkPubKey) + stakingPubKey := cmd.ValidateStakingPubKey(partner.StakingPubKey) + weight, valid := cmd.ValidateWeight(weights[partner.NodeID]) + if !valid { + log.Error().Msgf("weights: %v", weights) + log.Fatal().Msgf("partner node id %x has no weight", nodeID) + } + if weight != flow.DefaultInitialWeight { + log.Warn().Msgf("partner node (id=%x) has non-default weight (%d != %d)", partner.NodeID, weight, flow.DefaultInitialWeight) + } + + node := bootstrap.NewPublicNodeInfo( + nodeID, + partner.Role, + partner.Address, + weight, + networkPubKey.PublicKey, + stakingPubKey.PublicKey, + ) + nodes = append(nodes, node) + } + + return nodes +} + +// ReadPartnerNodes reads the partner node information +func ReadPartnerNodes(log zerolog.Logger, partnerNodeInfoDir string) []bootstrap.NodeInfoPub { + var partners []bootstrap.NodeInfoPub + files, err := FilesInDir(partnerNodeInfoDir) + if err != nil { + log.Fatal().Err(err).Msg("could not read partner node infos") + } + for _, f := range files { + // skip files that do not include node-infos + if !strings.Contains(f, bootstrap.PathPartnerNodeInfoPrefix) { + continue + } + + // read file and append to partners + var p bootstrap.NodeInfoPub + err = ReadJSON(f, &p) + if err != nil { + log.Fatal().Err(err).Msg("failed to read node info") + } + partners = append(partners, p) + } + return partners +} + +// ReadInternalNodeInfos returns a list of internal nodes after collecting weights +// from configuration files. +func ReadInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, internalWeightsConfig string) []bootstrap.NodeInfo { + privInternals := ReadInternalNodes(log, internalNodePrivInfoDir) + log.Info().Msgf("read %v internal private node-info files", len(privInternals)) + + weights := internalWeightsByAddress(log, internalWeightsConfig) + log.Info().Msgf("read %d weights for internal nodes", len(weights)) + + var nodes []bootstrap.NodeInfo + for _, internal := range privInternals { + // check if address is valid format + ValidateAddressFormat(log, internal.Address) + + // validate every single internal node + nodeID := cmd.ValidateNodeID(internal.NodeID) + weight, valid := cmd.ValidateWeight(weights[internal.Address]) + if !valid { + log.Error().Msgf("weights: %v", weights) + log.Fatal().Msgf("internal node %v has no weight. Did you forget to update the node address?", internal) + } + if weight != flow.DefaultInitialWeight { + log.Warn().Msgf("internal node (id=%x) has non-default weight (%d != %d)", internal.NodeID, weight, flow.DefaultInitialWeight) + } + + node := bootstrap.NewPrivateNodeInfo( + nodeID, + internal.Role, + internal.Address, + weight, + internal.NetworkPrivKey, + internal.StakingPrivKey, + ) + + nodes = append(nodes, node) + } + + return nodes +} + +// ReadInternalNodes reads our internal node private infos generated by +// `keygen` command and returns it +func ReadInternalNodes(log zerolog.Logger, internalNodePrivInfoDir string) []bootstrap.NodeInfoPriv { + var internalPrivInfos []bootstrap.NodeInfoPriv + + // get files in internal priv node infos directory + files, err := FilesInDir(internalNodePrivInfoDir) + if err != nil { + log.Fatal().Err(err).Msg("could not read partner node infos") + } + + // for each of the files + for _, f := range files { + // skip files that do not include node-infos + if !strings.Contains(f, bootstrap.PathPrivNodeInfoPrefix) { + continue + } + + // read file and append to partners + var p bootstrap.NodeInfoPriv + err = ReadJSON(f, &p) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } + internalPrivInfos = append(internalPrivInfos, p) + } + + return internalPrivInfos +} + +// internalWeightsByAddress returns a mapping of node address by weight for internal nodes +func internalWeightsByAddress(log zerolog.Logger, config string) map[string]uint64 { + // read json + var configs []bootstrap.NodeConfig + err := ReadJSON(config, &configs) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } + log.Info().Interface("config", configs).Msgf("read internal node configurations") + + weights := make(map[string]uint64) + for _, config := range configs { + if _, ok := weights[config.Address]; !ok { + weights[config.Address] = config.Weight + } else { + log.Error().Msgf("duplicate internal node address %s", config.Address) + } + } + + return weights +} diff --git a/cmd/util/cmd/common/snapshot.go b/cmd/util/cmd/common/snapshot.go new file mode 100644 index 00000000000..8bca3b88a9a --- /dev/null +++ b/cmd/util/cmd/common/snapshot.go @@ -0,0 +1,104 @@ +package common + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/rs/zerolog" + "github.com/sethvargo/go-retry" + + "github.com/onflow/flow-go-sdk/access/grpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/inmem" +) + +const getSnapshotTimeout = 30 * time.Second + +// GetProtocolSnapshot callback that will get latest finalized protocol snapshot +type GetProtocolSnapshot func(ctx context.Context) (protocol.Snapshot, error) + +// GetSnapshot will attempt to get the latest finalized protocol snapshot with the given flow configs +func GetSnapshot(ctx context.Context, client *grpc.Client) (*inmem.Snapshot, error) { + ctx, cancel := context.WithTimeout(ctx, getSnapshotTimeout) + defer cancel() + + b, err := client.GetLatestProtocolStateSnapshot(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get latest finalized protocol state snapshot during pre-initialization: %w", err) + } + + var snapshotEnc inmem.EncodableSnapshot + err = json.Unmarshal(b, &snapshotEnc) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal protocol state snapshot: %w", err) + } + + snapshot := inmem.SnapshotFromEncodable(snapshotEnc) + return snapshot, nil +} + +// GetSnapshotAtEpochAndPhase will get the latest finalized protocol snapshot and check the current epoch and epoch phase. +// If we are past the target epoch and epoch phase we exit the retry mechanism immediately. +// If not check the snapshot at the specified interval until we reach the target epoch and phase. +func GetSnapshotAtEpochAndPhase(ctx context.Context, log zerolog.Logger, startupEpoch uint64, startupEpochPhase flow.EpochPhase, retryInterval time.Duration, getSnapshot GetProtocolSnapshot) (protocol.Snapshot, error) { + start := time.Now() + + log = log.With(). + Uint64("target_epoch_counter", startupEpoch). + Str("target_epoch_phase", startupEpochPhase.String()). + Logger() + + log.Info().Msg("starting dynamic startup - waiting until target epoch/phase to start...") + + var snapshot protocol.Snapshot + var err error + + backoff := retry.NewConstant(retryInterval) + err = retry.Do(ctx, backoff, func(ctx context.Context) error { + snapshot, err = getSnapshot(ctx) + if err != nil { + err = fmt.Errorf("failed to get protocol snapshot: %w", err) + log.Error().Err(err).Msg("could not get protocol snapshot") + return retry.RetryableError(err) + } + + // if we encounter any errors interpreting the snapshot something went wrong stop retrying + currEpochCounter, err := snapshot.Epochs().Current().Counter() + if err != nil { + return fmt.Errorf("failed to get the current epoch counter: %w", err) + } + + currEpochPhase, err := snapshot.Phase() + if err != nil { + return fmt.Errorf("failed to get the current epoch phase: %w", err) + } + + // check if we are in or past the target epoch and phase + if currEpochCounter > startupEpoch || (currEpochCounter == startupEpoch && currEpochPhase >= startupEpochPhase) { + log.Info(). + Dur("time-waiting", time.Since(start)). + Uint64("current-epoch", currEpochCounter). + Str("current-epoch-phase", currEpochPhase.String()). + Msg("finished dynamic startup - reached desired epoch and phase") + + return nil + } + + // wait then poll for latest snapshot again + log.Info(). + Dur("time-waiting", time.Since(start)). + Uint64("current-epoch", currEpochCounter). + Str("current-epoch-phase", currEpochPhase.String()). + Msgf("waiting for epoch %d and phase %s", startupEpoch, startupEpochPhase.String()) + + return retry.RetryableError(fmt.Errorf("dynamic startup epoch and epoch phase not reached")) + }) + if err != nil { + return nil, fmt.Errorf("failed to wait for target epoch and phase: %w", err) + } + + return snapshot, nil +} diff --git a/cmd/util/cmd/common/utils.go b/cmd/util/cmd/common/utils.go new file mode 100644 index 00000000000..2f2a8d03e90 --- /dev/null +++ b/cmd/util/cmd/common/utils.go @@ -0,0 +1,130 @@ +package common + +import ( + "encoding/json" + "fmt" + "net" + "os" + "path/filepath" + "strconv" + + "github.com/multiformats/go-multiaddr" + "github.com/rs/zerolog" + + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/p2p/utils" + "github.com/onflow/flow-go/utils/io" +) + +func FilesInDir(dir string) ([]string, error) { + exists, err := PathExists(dir) + if err != nil { + return nil, fmt.Errorf("could not check if dir exists: %w", err) + } + + if !exists { + return nil, fmt.Errorf("dir %v does not exist", dir) + } + + var files []string + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if !info.IsDir() { + files = append(files, path) + } + return nil + }) + return files, err +} + +// PathExists +func PathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func ReadJSON(path string, target interface{}) error { + dat, err := io.ReadFile(path) + if err != nil { + return fmt.Errorf("cannot read json: %w", err) + } + err = json.Unmarshal(dat, target) + if err != nil { + return fmt.Errorf("cannot unmarshal json in file %s: %w", path, err) + } + return nil +} + +func WriteJSON(path string, out string, data interface{}) error { + bz, err := json.MarshalIndent(data, "", " ") + if err != nil { + return fmt.Errorf("cannot marshal json: %w", err) + } + + return WriteText(path, out, bz) +} + +func WriteText(path string, out string, data []byte) error { + path = filepath.Join(out, path) + + err := os.MkdirAll(filepath.Dir(path), 0755) + if err != nil { + return fmt.Errorf("could not create output dir: %w", err) + } + + err = os.WriteFile(path, data, 0644) + if err != nil { + return fmt.Errorf("could not write file: %w", err) + } + return nil +} + +func PubKeyToString(key crypto.PublicKey) string { + return fmt.Sprintf("%x", key.Encode()) +} + +func NodeCountByRole(nodes []bootstrap.NodeInfo) map[flow.Role]uint16 { + roleCounts := map[flow.Role]uint16{ + flow.RoleCollection: 0, + flow.RoleConsensus: 0, + flow.RoleExecution: 0, + flow.RoleVerification: 0, + flow.RoleAccess: 0, + } + for _, node := range nodes { + roleCounts[node.Role] = roleCounts[node.Role] + 1 + } + + return roleCounts +} + +// ValidateAddressFormat validates the address provided by pretty much doing what the network layer would do before +// starting the node +func ValidateAddressFormat(log zerolog.Logger, address string) { + checkErr := func(err error) { + if err != nil { + log.Fatal().Err(err).Str("address", address).Msg("invalid address format.\n" + + `Address needs to be in the format hostname:port or ip:port e.g. "flow.com:3569"`) + } + } + + // split address into ip/hostname and port + ip, port, err := net.SplitHostPort(address) + checkErr(err) + + // check that port number is indeed a number + _, err = strconv.Atoi(port) + checkErr(err) + + // create a libp2p address from the ip and port + lp2pAddr := utils.MultiAddressStr(ip, port) + _, err = multiaddr.NewMultiaddr(lp2pAddr) + checkErr(err) +} diff --git a/cmd/utils.go b/cmd/utils.go index a3464bceb7b..713a58375c8 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -11,7 +11,6 @@ import ( "github.com/rs/zerolog" "github.com/onflow/crypto" - "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" From 7d09f47d73fbf7930a38d2e591d783209c7f92a2 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Fri, 22 Mar 2024 10:49:32 -0400 Subject: [PATCH 011/148] generate tx args --- cmd/util/cmd/epochs/cmd/recover.go | 186 +++++++++++++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 cmd/util/cmd/epochs/cmd/recover.go diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go new file mode 100644 index 00000000000..086534769fc --- /dev/null +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -0,0 +1,186 @@ +package cmd + +import ( + "context" + "fmt" + "github.com/spf13/cobra" + + "github.com/onflow/cadence" + "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/cmd/util/cmd/common" + epochcmdutil "github.com/onflow/flow-go/cmd/util/cmd/epochs/utils" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol/inmem" +) + +// generateRecoverEpochTxArgsCmd represents a command to generate the data needed to submit an epoch recovery transaction the network is in EFM (epoch fallback mode). +// EFM can be exited only by a special service event, EpochRecover, which initially originates from a manual service account transaction. +// The full epoch data must be generated manually and submitted with this transaction in order for an +// EpochRecover event to be emitted. This command retrieves the current protocol state identities, computes the cluster assignment using those +// identities, generates the cluster QC's and retrieves the DKG key vector of the last successful epoch. +var ( + generateRecoverEpochTxArgsCmd = &cobra.Command{ + Use: "generate-efm-recovery-data", + Short: "Generates recover epoch transaction arguments", + Long: "Generates transaction arguments for the epoch recovery transaction.", + Run: generateRecoverEpochTxArgs, + } + + flagAnAddress string + flagAnPubkey string + flagPartnerWeights string + flagPartnerNodeInfoDir string + flagInternalNodePrivInfoDir string + flagConfig string + flagCollectionClusters int + flagStartView uint64 + flagStakingEndView uint64 + flagEndView uint64 +) + +func init() { + rootCmd.AddCommand(generateRecoverEpochTxArgsCmd) + addGenerateRecoverEpochTxArgsCmdFlags() +} + +func addGenerateRecoverEpochTxArgsCmdFlags() { + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagBucketNetworkName, "bucket-network-name", "", + "when retrieving the root snapshot from a GCP bucket, the network name portion of the URL (eg. \"mainnet-13\")") + generateRecoverEpochTxArgsCmd.Flags().IntVar(&flagCollectionClusters, "collection-clusters", 0, + "number of collection clusters") + // required parameters for network configuration and generation of root node identities + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagConfig, "config", "", + "path to a JSON file containing multiple node configurations (fields Role, Address, Weight)") + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory "+ + "containing the output from the `keygen` command for internal nodes") + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagPartnerNodeInfoDir, "partner-dir", "", "path to directory "+ + "containing one JSON file starting with node-info.pub..json for every partner node (fields "+ + " in the JSON file: Role, Address, NodeID, NetworkPubKey, StakingPubKey)") + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagPartnerWeights, "partner-weights", "", "path to a JSON file containing "+ + "a map from partner node's NodeID to their stake") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagStartView, "start-view", 0, "start view of the recovery epoch") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagStakingEndView, "staking-end-view", 0, "end view of the staking phase of the recovery epoch") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEndView, "end-view", 0, "end view of the recovery epoch") +} + +// generateRecoverEpochTxArgs generates recover epoch transaction arguments from a root protocol state snapshot and writes it to a JSON file +func generateRecoverEpochTxArgs(cmd *cobra.Command, args []string) { + stdout := cmd.OutOrStdout() + + // get flow client with secure client connection to download protocol snapshot from access node + config, err := common.NewFlowClientConfig(flagAnAddress, flagAnPubkey, flow.ZeroID, false) + if err != nil { + log.Fatal().Err(err).Msg("failed to create flow client config") + } + + flowClient, err := common.FlowClient(config) + if err != nil { + log.Fatal().Err(err).Msg("failed to create flow client") + } + + snapshot, err := common.GetSnapshot(context.Background(), flowClient) + if err != nil { + log.Fatal().Err(err).Msg("failed") + } + + // extract arguments from recover epoch tx from snapshot + txArgs := extractRecoverEpochArgs(snapshot) + + // encode to JSON + encodedTxArgs, err := epochcmdutil.EncodeArgs(txArgs) + if err != nil { + log.Fatal().Err(err).Msg("could not encode recover epoch transaction arguments") + } + + // write JSON args to stdout + _, err = stdout.Write(encodedTxArgs) + if err != nil { + log.Fatal().Err(err).Msg("could not write jsoncdc encoded arguments") + } +} + +// extractResetEpochArgs extracts the required transaction arguments for the `resetEpoch` transaction +func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { + epoch := snapshot.Epochs().Current() + ids, err := epoch.InitialIdentities() + if err != nil { + log.Fatal().Err(err).Msg("failed to get initial identities for current epoch") + } + + currentEpochDKG, err := epoch.DKG() + if err != nil { + log.Fatal().Err(err).Msg("failed to get DKG for current epoch") + } + + log.Info().Msg("collecting partner network and staking keys") + partnerNodes := common.ReadPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) + log.Info().Msg("") + + log.Info().Msg("generating internal private networking and staking keys") + internalNodes := common.ReadInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) + log.Info().Msg("") + + log.Info().Msg("computing collection node clusters") + _, clusters, err := common.ConstructClusterAssignment(log, partnerNodes, internalNodes, flagCollectionClusters) + if err != nil { + log.Fatal().Err(err).Msg("unable to generate cluster assignment") + } + log.Info().Msg("") + + epochCounter, err := epoch.Counter() + if err != nil { + log.Fatal().Err(err).Msg("unable to get epoch counter from current epoch") + } + log.Info().Msg("constructing root blocks for collection node clusters") + clusterBlocks := run.GenerateRootClusterBlocks(epochCounter, clusters) + log.Info().Msg("") + + log.Info().Msg("constructing root QCs for collection node clusters") + clusterQCs := common.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) + fmt.Sprintf("", clusterQCs) + log.Info().Msg("") + + randomSource, err := epoch.RandomSource() + if err != nil { + log.Fatal().Err(err).Msg("failed to get random source for current epoch") + } + randomSourceCdc, err := cadence.NewString(string(randomSource)) + if err != nil { + log.Fatal().Err(err).Msg("failed to get random source cadence string") + } + + dkgPubKeys := make([]cadence.Value, 0) + nodeIds := make([]cadence.Value, 0) + ids.Map(func(skeleton flow.IdentitySkeleton) flow.IdentitySkeleton { + if skeleton.GetRole() == flow.RoleConsensus { + dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(skeleton.GetNodeID()) + if keyShareErr != nil { + log.Fatal().Err(keyShareErr).Msg(fmt.Sprintf("failed to get dkg pub key share for node: %s", skeleton.GetNodeID())) + } + dkgPubKeyCdc, cdcErr := cadence.NewString(dkgPubKey.String()) + if cdcErr != nil { + log.Fatal().Err(cdcErr).Msg(fmt.Sprintf("failed to get dkg pub key cadence string for node: %s", skeleton.GetNodeID())) + } + dkgPubKeys = append(dkgPubKeys, dkgPubKeyCdc) + } + nodeIdCdc, err := cadence.NewString(skeleton.GetNodeID().String()) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("failed to convert node ID to cadence string: %s", skeleton.GetNodeID())) + } + nodeIds = append(nodeIds, nodeIdCdc) + return skeleton + }) + + args := []cadence.Value{ + randomSourceCdc, + cadence.NewUInt64(flagStartView), + cadence.NewUInt64(flagStakingEndView), + cadence.NewUInt64(flagEndView), + cadence.NewArray(dkgPubKeys), + cadence.NewArray(nodeIds), + // clusters + // clusterQcs + } + + return args +} From 84a3d4d00a5ca3de79d18eb6c715b9d4173c68c5 Mon Sep 17 00:00:00 2001 From: Andrii Date: Fri, 22 Mar 2024 17:49:51 +0200 Subject: [PATCH 012/148] Added new test with two different observers --- .../access/cohort2/observer_indexer_enabled_extended_test.go | 1 + 1 file changed, 1 insertion(+) create mode 100644 integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go new file mode 100644 index 00000000000..0d80cbe8a54 --- /dev/null +++ b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go @@ -0,0 +1 @@ +package cohort2 From 37219743db3368a3ee86bcc2c827189ad9c4b97c Mon Sep 17 00:00:00 2001 From: Andrii Date: Fri, 22 Mar 2024 17:49:59 +0200 Subject: [PATCH 013/148] Added new test with two different observers --- .../observer_indexer_enabled_extended_test.go | 516 ++++++++++++++++++ 1 file changed, 516 insertions(+) diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go index 0d80cbe8a54..ca45d3cd06d 100644 --- a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go +++ b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go @@ -1 +1,517 @@ package cohort2 + +import ( + "bytes" + "context" + "fmt" + "net/http" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + sdk "github.com/onflow/flow-go-sdk" + sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go-sdk/templates" + "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +func TestObserverIndexerEnabledExtended(t *testing.T) { + suite.Run(t, new(ObserverIndexerEnabledExtendedSuite)) +} + +// ObserverIndexerEnabledExtendedSuite tests the observer with the indexer enabled. +// It uses ObserverSuite as a base to reuse the test cases that need to be run for any observer variation. +type ObserverIndexerEnabledExtendedSuite struct { + ObserverSuite +} + +// SetupTest sets up the test suite by starting the network and preparing the observer client. +// By overriding this function, we can ensure that the observer is started with correct parameters and select +// the RPCs and REST endpoints that are tested. +func (s *ObserverIndexerEnabledExtendedSuite) SetupTest() { + s.localRpc = map[string]struct{}{ + "Ping": {}, + "GetLatestBlockHeader": {}, + "GetBlockHeaderByID": {}, + "GetBlockHeaderByHeight": {}, + "GetLatestBlock": {}, + "GetBlockByID": {}, + "GetBlockByHeight": {}, + "GetLatestProtocolStateSnapshot": {}, + "GetNetworkParameters": {}, + "GetTransactionsByBlockID": {}, + "GetTransaction": {}, + "GetCollectionByID": {}, + "ExecuteScriptAtBlockID": {}, + "ExecuteScriptAtLatestBlock": {}, + "ExecuteScriptAtBlockHeight": {}, + "GetAccount": {}, + "GetAccountAtLatestBlock": {}, + "GetAccountAtBlockHeight": {}, + } + + s.localRest = map[string]struct{}{ + "getBlocksByIDs": {}, + "getBlocksByHeight": {}, + "getBlockPayloadByID": {}, + "getNetworkParameters": {}, + "getNodeVersionInfo": {}, + } + + s.testedRPCs = s.getRPCs + s.testedRestEndpoints = s.getRestEndpoints + + consensusConfigs := []func(config *testnet.NodeConfig){ + // `cruise-ctl-fallback-proposal-duration` is set to 250ms instead to of 100ms + // to purposely slow down the block rate. This is needed since the crypto module + // update providing faster BLS operations. + // TODO: fix the access integration test logic to function without slowing down + // the block rate + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=250ms"), + testnet.WithAdditionalFlagf("--required-verification-seal-approvals=%d", 1), + testnet.WithAdditionalFlagf("--required-construction-seal-approvals=%d", 1), + testnet.WithLogLevel(zerolog.FatalLevel), + } + + nodeConfigs := []testnet.NodeConfig{ + // access node with unstaked nodes supported + testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--supports-observer=true"), + testnet.WithAdditionalFlagf("--public-network-execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--script-execution-mode=%s", backend.IndexQueryModeExecutionNodesOnly), + testnet.WithAdditionalFlagf("--tx-result-query-mode=%s", backend.IndexQueryModeExecutionNodesOnly), + testnet.WithAdditionalFlag("--event-query-mode=execution-nodes-only"), + ), + + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + } + + observers := []testnet.ObserverConfig{{ + ContainerName: testnet.PrimaryON, + LogLevel: zerolog.InfoLevel, + AdditionalFlags: []string{ + fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + fmt.Sprintf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + "--execution-data-sync-enabled=true", + "--execution-data-indexing-enabled=true", + "--local-service-api-enabled=true", + "--event-query-mode=execution-nodes-only", + }, + }, + { + ContainerName: "observer_2", + LogLevel: zerolog.InfoLevel, + }, + } + + // prepare the network + conf := testnet.NewNetworkConfig("observer_indexing_enabled_extended_test", nodeConfigs, testnet.WithObservers(observers...)) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + ctx, cancel := context.WithCancel(context.Background()) + s.cancel = cancel + + s.net.Start(ctx) +} + +// TestObserverIndexedRPCsHappyPath tests RPCs that are handled by the observer by using a dedicated indexer for the events. +// For now the observer only supports the following RPCs: +// - GetEventsForHeightRange +// - GetEventsForBlockIDs +// To ensure that the observer is handling these RPCs, we stop the upstream access node and verify that the observer client +// returns success for valid requests and errors for invalid ones. +func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + t := s.T() + + // prepare environment to create a new account + serviceAccountClient, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + require.NoError(t, err) + + latestBlockID, err := serviceAccountClient.GetLatestBlockID(ctx) + require.NoError(t, err) + + // create new account to deploy Counter to + accountPrivateKey := lib.RandomPrivateKey() + + accountKey := sdk.NewAccountKey(). + FromPrivateKey(accountPrivateKey). + SetHashAlgo(sdkcrypto.SHA3_256). + SetWeight(sdk.AccountKeyWeightThreshold) + + serviceAddress := sdk.Address(serviceAccountClient.Chain.ServiceAddress()) + + // Generate the account creation transaction + createAccountTx, err := templates.CreateAccount( + []*sdk.AccountKey{accountKey}, + []templates.Contract{ + { + Name: lib.CounterContract.Name, + Source: lib.CounterContract.ToCadence(), + }, + }, serviceAddress) + require.NoError(t, err) + createAccountTx. + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(serviceAddress, 0, serviceAccountClient.GetSeqNumber()). + SetPayer(serviceAddress). + SetComputeLimit(9999) + + // send the create account tx + childCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + err = serviceAccountClient.SignAndSendTransaction(childCtx, createAccountTx) + require.NoError(t, err) + + cancel() + + // wait for account to be created + var accountCreationTxRes *sdk.TransactionResult + unittest.RequireReturnsBefore(t, func() { + accountCreationTxRes, err = serviceAccountClient.WaitForSealed(context.Background(), createAccountTx.ID()) + require.NoError(t, err) + }, 20*time.Second, "has to seal before timeout") + + // obtain the account address + var accountCreatedPayload []byte + var newAccountAddress sdk.Address + for _, event := range accountCreationTxRes.Events { + if event.Type == sdk.EventAccountCreated { + accountCreatedEvent := sdk.AccountCreatedEvent(event) + accountCreatedPayload = accountCreatedEvent.Payload + newAccountAddress = accountCreatedEvent.Address() + break + } + } + require.NotEqual(t, sdk.EmptyAddress, newAccountAddress) + + // now we can query events using observer to data which has to be locally indexed + + // get an observer client + observer, err := s.getObserverClient() + require.NoError(t, err) + + observer_2, err := s.getClient(s.net.ContainerByName("observer_2").Addr(testnet.GRPCPort)) + require.NoError(t, err) + + // wait for data to be synced by observer + require.Eventually(t, func() bool { + _, err := observer.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: newAccountAddress.Bytes(), + BlockHeight: accountCreationTxRes.BlockHeight, + }) + statusErr, ok := status.FromError(err) + if !ok || err == nil { + return true + } + return statusErr.Code() != codes.OutOfRange + }, 30*time.Second, 1*time.Second) + + // observer_2 + require.Eventually(t, func() bool { + _, err := observer_2.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: newAccountAddress.Bytes(), + BlockHeight: accountCreationTxRes.BlockHeight, + }) + statusErr, ok := status.FromError(err) + if !ok || err == nil { + return true + } + return statusErr.Code() != codes.OutOfRange + }, 30*time.Second, 1*time.Second) + + blockWithAccount, err := observer.GetBlockHeaderByID(ctx, &accessproto.GetBlockHeaderByIDRequest{ + Id: accountCreationTxRes.BlockID[:], + }) + require.NoError(t, err) + + blockWithAccount_2, err := observer_2.GetBlockHeaderByID(ctx, &accessproto.GetBlockHeaderByIDRequest{ + Id: accountCreationTxRes.BlockID[:], + }) + require.NoError(t, err) + + // stop the upstream access container + err = s.net.StopContainerByName(ctx, testnet.PrimaryAN) + require.NoError(t, err) + + eventsByBlockID, err := observer.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + Type: sdk.EventAccountCreated, + BlockIds: [][]byte{blockWithAccount.Block.Id}, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(t, err) + + eventsByBlockID_2, err := observer.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + Type: sdk.EventAccountCreated, + BlockIds: [][]byte{blockWithAccount_2.Block.Id}, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(t, err) + + eventsByHeight, err := observer.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: blockWithAccount.Block.Height, + EndHeight: blockWithAccount.Block.Height, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(t, err) + + eventsByHeight_2, err := observer.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: blockWithAccount_2.Block.Height, + EndHeight: blockWithAccount_2.Block.Height, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(t, err) + + // validate that there is an event that we are looking for + require.Equal(t, eventsByHeight.Results, eventsByBlockID.Results) + found := false + for _, eventsInBlock := range eventsByHeight.Results { + for _, event := range eventsInBlock.Events { + if event.Type == sdk.EventAccountCreated { + if bytes.Equal(event.Payload, accountCreatedPayload) { + found = true + } + } + } + } + require.True(t, found) + + // validate that there is an event that we are looking for + require.Equal(t, eventsByHeight_2.Results, eventsByBlockID_2.Results) + found = false + for _, eventsInBlock := range eventsByHeight_2.Results { + for _, event := range eventsInBlock.Events { + if event.Type == sdk.EventAccountCreated { + if bytes.Equal(event.Payload, accountCreatedPayload) { + found = true + } + } + } + } + require.True(t, found) + +} + +func (s *ObserverIndexerEnabledExtendedSuite) getRPCs() []RPCTest { + return []RPCTest{ + {name: "Ping", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.Ping(ctx, &accessproto.PingRequest{}) + return err + }}, + {name: "GetLatestBlockHeader", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetLatestBlockHeader(ctx, &accessproto.GetLatestBlockHeaderRequest{}) + return err + }}, + {name: "GetBlockHeaderByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetBlockHeaderByID(ctx, &accessproto.GetBlockHeaderByIDRequest{ + Id: make([]byte, 32), + }) + return err + }}, + {name: "GetBlockHeaderByHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetBlockHeaderByHeight(ctx, &accessproto.GetBlockHeaderByHeightRequest{}) + return err + }}, + {name: "GetLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetLatestBlock(ctx, &accessproto.GetLatestBlockRequest{}) + return err + }}, + {name: "GetBlockByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetBlockByID(ctx, &accessproto.GetBlockByIDRequest{Id: make([]byte, 32)}) + return err + }}, + {name: "GetBlockByHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetBlockByHeight(ctx, &accessproto.GetBlockByHeightRequest{}) + return err + }}, + {name: "GetCollectionByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{Id: make([]byte, 32)}) + return err + }}, + {name: "SendTransaction", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.SendTransaction(ctx, &accessproto.SendTransactionRequest{}) + return err + }}, + {name: "GetTransaction", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransaction(ctx, &accessproto.GetTransactionRequest{Id: make([]byte, 32)}) + return err + }}, + {name: "GetTransactionResult", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{}) + return err + }}, + {name: "GetTransactionResultByIndex", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{}) + return err + }}, + {name: "GetTransactionResultsByBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{}) + return err + }}, + {name: "GetTransactionsByBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{BlockId: make([]byte, 32)}) + return err + }}, + {name: "GetAccount", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetAccount(ctx, &accessproto.GetAccountRequest{ + Address: flow.Localnet.Chain().ServiceAddress().Bytes(), + }) + return err + }}, + {name: "GetAccountAtLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ + Address: flow.Localnet.Chain().ServiceAddress().Bytes(), + }) + return err + }}, + {name: "GetAccountAtBlockHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: flow.Localnet.Chain().ServiceAddress().Bytes(), + BlockHeight: 0, + }) + return err + }}, + {name: "ExecuteScriptAtLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.ExecuteScriptAtLatestBlock(ctx, &accessproto.ExecuteScriptAtLatestBlockRequest{ + Script: []byte(simpleScript), + Arguments: make([][]byte, 0), + }) + return err + }}, + {name: "ExecuteScriptAtBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: make([]byte, 32), + Script: []byte("dummy script"), + Arguments: make([][]byte, 0), + }) + return err + }}, + {name: "ExecuteScriptAtBlockHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: 0, + Script: []byte(simpleScript), + Arguments: make([][]byte, 0), + }) + return err + }}, + {name: "GetNetworkParameters", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetNetworkParameters(ctx, &accessproto.GetNetworkParametersRequest{}) + return err + }}, + {name: "GetLatestProtocolStateSnapshot", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetLatestProtocolStateSnapshot(ctx, &accessproto.GetLatestProtocolStateSnapshotRequest{}) + return err + }}, + {name: "GetExecutionResultForBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetExecutionResultForBlockID(ctx, &accessproto.GetExecutionResultForBlockIDRequest{}) + return err + }}, + } +} + +func (s *ObserverIndexerEnabledExtendedSuite) getRestEndpoints() []RestEndpointTest { + transactionId := unittest.IdentifierFixture().String() + account := flow.Localnet.Chain().ServiceAddress().String() + block := unittest.BlockFixture() + executionResult := unittest.ExecutionResultFixture() + collection := unittest.CollectionFixture(2) + eventType := unittest.EventTypeFixture(flow.Localnet) + + return []RestEndpointTest{ + { + name: "getTransactionByID", + method: http.MethodGet, + path: "/transactions/" + transactionId, + }, + { + name: "createTransaction", + method: http.MethodPost, + path: "/transactions", + body: createTx(s.net), + }, + { + name: "getTransactionResultByID", + method: http.MethodGet, + path: fmt.Sprintf("/transaction_results/%s?block_id=%s&collection_id=%s", transactionId, block.ID().String(), collection.ID().String()), + }, + { + name: "getBlocksByIDs", + method: http.MethodGet, + path: "/blocks/" + block.ID().String(), + }, + { + name: "getBlocksByHeight", + method: http.MethodGet, + path: "/blocks?height=1", + }, + { + name: "getBlockPayloadByID", + method: http.MethodGet, + path: "/blocks/" + block.ID().String() + "/payload", + }, + { + name: "getExecutionResultByID", + method: http.MethodGet, + path: "/execution_results/" + executionResult.ID().String(), + }, + { + name: "getExecutionResultByBlockID", + method: http.MethodGet, + path: "/execution_results?block_id=" + block.ID().String(), + }, + { + name: "getCollectionByID", + method: http.MethodGet, + path: "/collections/" + collection.ID().String(), + }, + { + name: "executeScript", + method: http.MethodPost, + path: "/scripts", + body: createScript(), + }, + { + name: "getAccount", + method: http.MethodGet, + path: "/accounts/" + account + "?block_height=1", + }, + { + name: "getEvents", + method: http.MethodGet, + path: fmt.Sprintf("/events?type=%s&start_height=%d&end_height=%d", eventType, 0, 3), + }, + { + name: "getNetworkParameters", + method: http.MethodGet, + path: "/network/parameters", + }, + { + name: "getNodeVersionInfo", + method: http.MethodGet, + path: "/node_version_info", + }, + } +} From 2e62a67f22ca73beeb0ecceb18fa6585c24e3fb8 Mon Sep 17 00:00:00 2001 From: Andrii Date: Tue, 26 Mar 2024 12:43:43 +0200 Subject: [PATCH 014/148] Added more calls to test, added access node calls --- .../observer_indexer_enabled_extended_test.go | 417 +++++++----------- 1 file changed, 158 insertions(+), 259 deletions(-) diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go index ca45d3cd06d..475768d8b90 100644 --- a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go +++ b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "fmt" - "net/http" + "github.com/onflow/flow/protobuf/go/flow/entities" "testing" "time" @@ -24,7 +24,6 @@ import ( "github.com/onflow/flow-go/utils/unittest" accessproto "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/entities" ) func TestObserverIndexerEnabledExtended(t *testing.T) { @@ -41,38 +40,6 @@ type ObserverIndexerEnabledExtendedSuite struct { // By overriding this function, we can ensure that the observer is started with correct parameters and select // the RPCs and REST endpoints that are tested. func (s *ObserverIndexerEnabledExtendedSuite) SetupTest() { - s.localRpc = map[string]struct{}{ - "Ping": {}, - "GetLatestBlockHeader": {}, - "GetBlockHeaderByID": {}, - "GetBlockHeaderByHeight": {}, - "GetLatestBlock": {}, - "GetBlockByID": {}, - "GetBlockByHeight": {}, - "GetLatestProtocolStateSnapshot": {}, - "GetNetworkParameters": {}, - "GetTransactionsByBlockID": {}, - "GetTransaction": {}, - "GetCollectionByID": {}, - "ExecuteScriptAtBlockID": {}, - "ExecuteScriptAtLatestBlock": {}, - "ExecuteScriptAtBlockHeight": {}, - "GetAccount": {}, - "GetAccountAtLatestBlock": {}, - "GetAccountAtBlockHeight": {}, - } - - s.localRest = map[string]struct{}{ - "getBlocksByIDs": {}, - "getBlocksByHeight": {}, - "getBlockPayloadByID": {}, - "getNetworkParameters": {}, - "getNodeVersionInfo": {}, - } - - s.testedRPCs = s.getRPCs - s.testedRestEndpoints = s.getRestEndpoints - consensusConfigs := []func(config *testnet.NodeConfig){ // `cruise-ctl-fallback-proposal-duration` is set to 250ms instead to of 100ms // to purposely slow down the block rate. This is needed since the crypto module @@ -134,12 +101,6 @@ func (s *ObserverIndexerEnabledExtendedSuite) SetupTest() { s.net.Start(ctx) } -// TestObserverIndexedRPCsHappyPath tests RPCs that are handled by the observer by using a dedicated indexer for the events. -// For now the observer only supports the following RPCs: -// - GetEventsForHeightRange -// - GetEventsForBlockIDs -// To ensure that the observer is handling these RPCs, we stop the upstream access node and verify that the observer client -// returns success for valid requests and errors for invalid ones. func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -173,6 +134,7 @@ func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() }, }, serviceAddress) require.NoError(t, err) + createAccountTx. SetReferenceBlockID(sdk.Identifier(latestBlockID)). SetProposalKey(serviceAddress, 0, serviceAccountClient.GetSeqNumber()). @@ -208,6 +170,9 @@ func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() // now we can query events using observer to data which has to be locally indexed + access, err := s.getClient(s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort)) + require.NoError(t, err) + // get an observer client observer, err := s.getObserverClient() require.NoError(t, err) @@ -215,6 +180,18 @@ func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() observer_2, err := s.getClient(s.net.ContainerByName("observer_2").Addr(testnet.GRPCPort)) require.NoError(t, err) + // wait for data to be synced by observer + require.Eventually(t, func() bool { + _, err := observer.GetAccount(ctx, &accessproto.GetAccountRequest{ + Address: newAccountAddress.Bytes(), + }) + statusErr, ok := status.FromError(err) + if !ok || err == nil { + return true + } + return statusErr.Code() != codes.OutOfRange + }, 30*time.Second, 1*time.Second) + // wait for data to be synced by observer require.Eventually(t, func() bool { _, err := observer.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ @@ -228,11 +205,10 @@ func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() return statusErr.Code() != codes.OutOfRange }, 30*time.Second, 1*time.Second) - // observer_2 + // wait for data to be synced by observer require.Eventually(t, func() bool { - _, err := observer_2.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ - Address: newAccountAddress.Bytes(), - BlockHeight: accountCreationTxRes.BlockHeight, + _, err := observer.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ + Address: newAccountAddress.Bytes(), }) statusErr, ok := status.FromError(err) if !ok || err == nil { @@ -251,8 +227,11 @@ func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() }) require.NoError(t, err) - // stop the upstream access container - err = s.net.StopContainerByName(ctx, testnet.PrimaryAN) + accessEventsByBlockID, err := access.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + Type: sdk.EventAccountCreated, + BlockIds: [][]byte{blockWithAccount.Block.Id}, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) require.NoError(t, err) eventsByBlockID, err := observer.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ @@ -262,13 +241,25 @@ func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() }) require.NoError(t, err) - eventsByBlockID_2, err := observer.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + eventsByBlockID_2, err := observer_2.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ Type: sdk.EventAccountCreated, BlockIds: [][]byte{blockWithAccount_2.Block.Id}, EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, }) require.NoError(t, err) + require.Equal(t, eventsByBlockID.Results, accessEventsByBlockID.Results) + require.Equal(t, eventsByBlockID_2.Results, accessEventsByBlockID.Results) + + // GetEventsForHeightRange + accessEventsByHeight, err := access.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: blockWithAccount.Block.Height, + EndHeight: blockWithAccount.Block.Height, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(t, err) + eventsByHeight, err := observer.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ Type: sdk.EventAccountCreated, StartHeight: blockWithAccount.Block.Height, @@ -277,7 +268,7 @@ func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() }) require.NoError(t, err) - eventsByHeight_2, err := observer.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + eventsByHeight_2, err := observer_2.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ Type: sdk.EventAccountCreated, StartHeight: blockWithAccount_2.Block.Height, EndHeight: blockWithAccount_2.Block.Height, @@ -285,6 +276,9 @@ func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() }) require.NoError(t, err) + require.Equal(t, eventsByHeight.Results, accessEventsByHeight.Results) + require.Equal(t, eventsByHeight_2.Results, accessEventsByHeight.Results) + // validate that there is an event that we are looking for require.Equal(t, eventsByHeight.Results, eventsByBlockID.Results) found := false @@ -299,219 +293,124 @@ func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() } require.True(t, found) - // validate that there is an event that we are looking for - require.Equal(t, eventsByHeight_2.Results, eventsByBlockID_2.Results) - found = false - for _, eventsInBlock := range eventsByHeight_2.Results { - for _, event := range eventsInBlock.Events { - if event.Type == sdk.EventAccountCreated { - if bytes.Equal(event.Payload, accountCreatedPayload) { - found = true - } - } - } - } - require.True(t, found) + // GetAccount + getAccountObserver1Response, err := observer.GetAccount(ctx, &accessproto.GetAccountRequest{ + Address: newAccountAddress.Bytes(), + }) + require.NoError(t, err) -} + getAccountObserver2Response, err := observer_2.GetAccount(ctx, &accessproto.GetAccountRequest{ + Address: newAccountAddress.Bytes(), + }) + require.NoError(t, err) -func (s *ObserverIndexerEnabledExtendedSuite) getRPCs() []RPCTest { - return []RPCTest{ - {name: "Ping", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.Ping(ctx, &accessproto.PingRequest{}) - return err - }}, - {name: "GetLatestBlockHeader", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetLatestBlockHeader(ctx, &accessproto.GetLatestBlockHeaderRequest{}) - return err - }}, - {name: "GetBlockHeaderByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetBlockHeaderByID(ctx, &accessproto.GetBlockHeaderByIDRequest{ - Id: make([]byte, 32), - }) - return err - }}, - {name: "GetBlockHeaderByHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetBlockHeaderByHeight(ctx, &accessproto.GetBlockHeaderByHeightRequest{}) - return err - }}, - {name: "GetLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetLatestBlock(ctx, &accessproto.GetLatestBlockRequest{}) - return err - }}, - {name: "GetBlockByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetBlockByID(ctx, &accessproto.GetBlockByIDRequest{Id: make([]byte, 32)}) - return err - }}, - {name: "GetBlockByHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetBlockByHeight(ctx, &accessproto.GetBlockByHeightRequest{}) - return err - }}, - {name: "GetCollectionByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{Id: make([]byte, 32)}) - return err - }}, - {name: "SendTransaction", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.SendTransaction(ctx, &accessproto.SendTransactionRequest{}) - return err - }}, - {name: "GetTransaction", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetTransaction(ctx, &accessproto.GetTransactionRequest{Id: make([]byte, 32)}) - return err - }}, - {name: "GetTransactionResult", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{}) - return err - }}, - {name: "GetTransactionResultByIndex", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{}) - return err - }}, - {name: "GetTransactionResultsByBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{}) - return err - }}, - {name: "GetTransactionsByBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{BlockId: make([]byte, 32)}) - return err - }}, - {name: "GetAccount", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetAccount(ctx, &accessproto.GetAccountRequest{ - Address: flow.Localnet.Chain().ServiceAddress().Bytes(), - }) - return err - }}, - {name: "GetAccountAtLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ - Address: flow.Localnet.Chain().ServiceAddress().Bytes(), - }) - return err - }}, - {name: "GetAccountAtBlockHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ - Address: flow.Localnet.Chain().ServiceAddress().Bytes(), - BlockHeight: 0, - }) - return err - }}, - {name: "ExecuteScriptAtLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.ExecuteScriptAtLatestBlock(ctx, &accessproto.ExecuteScriptAtLatestBlockRequest{ - Script: []byte(simpleScript), - Arguments: make([][]byte, 0), - }) - return err - }}, - {name: "ExecuteScriptAtBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ - BlockId: make([]byte, 32), - Script: []byte("dummy script"), - Arguments: make([][]byte, 0), - }) - return err - }}, - {name: "ExecuteScriptAtBlockHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ - BlockHeight: 0, - Script: []byte(simpleScript), - Arguments: make([][]byte, 0), - }) - return err - }}, - {name: "GetNetworkParameters", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetNetworkParameters(ctx, &accessproto.GetNetworkParametersRequest{}) - return err - }}, - {name: "GetLatestProtocolStateSnapshot", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetLatestProtocolStateSnapshot(ctx, &accessproto.GetLatestProtocolStateSnapshotRequest{}) - return err - }}, - {name: "GetExecutionResultForBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetExecutionResultForBlockID(ctx, &accessproto.GetExecutionResultForBlockIDRequest{}) - return err - }}, - } -} + getAccountAccessResponse, err := access.GetAccount(ctx, &accessproto.GetAccountRequest{ + Address: newAccountAddress.Bytes(), + }) + require.NoError(t, err) -func (s *ObserverIndexerEnabledExtendedSuite) getRestEndpoints() []RestEndpointTest { - transactionId := unittest.IdentifierFixture().String() - account := flow.Localnet.Chain().ServiceAddress().String() - block := unittest.BlockFixture() - executionResult := unittest.ExecutionResultFixture() - collection := unittest.CollectionFixture(2) - eventType := unittest.EventTypeFixture(flow.Localnet) + require.Equal(t, getAccountAccessResponse.Account, getAccountObserver2Response.Account) + require.Equal(t, getAccountAccessResponse.Account, getAccountObserver1Response.Account) + + // GetAccountAtBlockHeight + getAccountAtBlockHeightObserver1Response, err := observer.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: newAccountAddress.Bytes(), + BlockHeight: accountCreationTxRes.BlockHeight, + }) + require.NoError(t, err) + + getAccountAtBlockHeightObserver2Response, err := observer_2.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: newAccountAddress.Bytes(), + BlockHeight: accountCreationTxRes.BlockHeight, + }) + require.NoError(t, err) + + getAccountAtBlockHeightAccessResponse, err := access.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: newAccountAddress.Bytes(), + BlockHeight: accountCreationTxRes.BlockHeight, + }) + require.NoError(t, err) + + require.Equal(t, getAccountAtBlockHeightObserver2Response.Account, getAccountAtBlockHeightAccessResponse.Account) + require.Equal(t, getAccountAtBlockHeightObserver1Response.Account, getAccountAtBlockHeightAccessResponse.Account) + + //GetAccountAtLatestBlock + getAccountAtLatestBlockObserver1Response, err := observer.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ + Address: newAccountAddress.Bytes(), + }) + require.NoError(t, err) + + getAccountAtLatestBlockObserver2Response, err := observer_2.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ + Address: newAccountAddress.Bytes(), + }) + require.NoError(t, err) + + getAccountAtLatestBlockAccessResponse, err := access.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ + Address: newAccountAddress.Bytes(), + }) + require.NoError(t, err) + + require.Equal(t, getAccountAtLatestBlockObserver2Response.Account, getAccountAtLatestBlockAccessResponse.Account) + require.Equal(t, getAccountAtLatestBlockObserver1Response.Account, getAccountAtLatestBlockAccessResponse.Account) + + // GetSystemTransaction + getSystemTransactionObserver1Response, err := observer.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ + BlockId: blockWithAccount.Block.Id, + }) + require.NoError(t, err) + + getSystemTransactionObserver2Response, err := observer_2.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ + BlockId: blockWithAccount.Block.Id, + }) + require.NoError(t, err) + + getSystemTransactionAccessResponse, err := access.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ + BlockId: blockWithAccount.Block.Id, + }) + require.NoError(t, err) + + require.Equal(t, getSystemTransactionObserver2Response.Transaction, getSystemTransactionAccessResponse.Transaction) + require.Equal(t, getSystemTransactionObserver1Response.Transaction, getSystemTransactionAccessResponse.Transaction) + + // GetSystemTransactionResult + getSystemTransactionResultObserver2Response, err := observer_2.GetSystemTransactionResult(ctx, &accessproto.GetSystemTransactionResultRequest{ + BlockId: blockWithAccount.Block.Id, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(t, err) + + getSystemTransactionResultAccessResponse, err := access.GetSystemTransactionResult(ctx, &accessproto.GetSystemTransactionResultRequest{ + BlockId: blockWithAccount.Block.Id, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(t, err) + + //getSystemTransactionResultObserver1Response, err := observer.GetSystemTransactionResult(ctx, &accessproto.GetSystemTransactionResultRequest{ + // BlockId: blockWithAccount.Block.Id, + // EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + //}) + //require.NoError(t, err) + + require.Equal(t, getSystemTransactionResultObserver2Response.Events, getSystemTransactionResultAccessResponse.Events) + //require.Equal(t, getSystemTransactionResultObserver1Response.Events, getSystemTransactionResultAccessResponse.Events) + + // GetExecutionResultByID + getExecutionResultByIDObserver1Response, err := observer.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ + Id: blockWithAccount.Block.Id, + }) + require.NoError(t, err) + + getExecutionResultByIDObserver2Response, err := observer_2.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ + Id: blockWithAccount.Block.Id, + }) + require.NoError(t, err) + + getExecutionResultByIDAccessResponse, err := access.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ + Id: blockWithAccount.Block.Id, + }) + require.NoError(t, err) + + require.Equal(t, getExecutionResultByIDAccessResponse.ExecutionResult, getExecutionResultByIDObserver2Response.ExecutionResult) + require.Equal(t, getExecutionResultByIDAccessResponse.ExecutionResult, getExecutionResultByIDObserver1Response.ExecutionResult) - return []RestEndpointTest{ - { - name: "getTransactionByID", - method: http.MethodGet, - path: "/transactions/" + transactionId, - }, - { - name: "createTransaction", - method: http.MethodPost, - path: "/transactions", - body: createTx(s.net), - }, - { - name: "getTransactionResultByID", - method: http.MethodGet, - path: fmt.Sprintf("/transaction_results/%s?block_id=%s&collection_id=%s", transactionId, block.ID().String(), collection.ID().String()), - }, - { - name: "getBlocksByIDs", - method: http.MethodGet, - path: "/blocks/" + block.ID().String(), - }, - { - name: "getBlocksByHeight", - method: http.MethodGet, - path: "/blocks?height=1", - }, - { - name: "getBlockPayloadByID", - method: http.MethodGet, - path: "/blocks/" + block.ID().String() + "/payload", - }, - { - name: "getExecutionResultByID", - method: http.MethodGet, - path: "/execution_results/" + executionResult.ID().String(), - }, - { - name: "getExecutionResultByBlockID", - method: http.MethodGet, - path: "/execution_results?block_id=" + block.ID().String(), - }, - { - name: "getCollectionByID", - method: http.MethodGet, - path: "/collections/" + collection.ID().String(), - }, - { - name: "executeScript", - method: http.MethodPost, - path: "/scripts", - body: createScript(), - }, - { - name: "getAccount", - method: http.MethodGet, - path: "/accounts/" + account + "?block_height=1", - }, - { - name: "getEvents", - method: http.MethodGet, - path: fmt.Sprintf("/events?type=%s&start_height=%d&end_height=%d", eventType, 0, 3), - }, - { - name: "getNetworkParameters", - method: http.MethodGet, - path: "/network/parameters", - }, - { - name: "getNodeVersionInfo", - method: http.MethodGet, - path: "/node_version_info", - }, - } } From d0e70db6e1631c78ac01830ac2a8e47857aece9a Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 1 Feb 2024 08:39:11 -0800 Subject: [PATCH 015/148] add ingestion throttle --- engine/execution/ingestion/throttle.go | 301 +++++++++++++++++++++++++ 1 file changed, 301 insertions(+) create mode 100644 engine/execution/ingestion/throttle.go diff --git a/engine/execution/ingestion/throttle.go b/engine/execution/ingestion/throttle.go new file mode 100644 index 00000000000..5e6612e8c9b --- /dev/null +++ b/engine/execution/ingestion/throttle.go @@ -0,0 +1,301 @@ +package ingestion + +import ( + "context" + "fmt" + "sync" + + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" + "github.com/rs/zerolog" +) + +// CatchUpThreshold is the number of blocks that if the execution is far behind +// the finalization then we will only lazy load the next unexecuted finalized +// blocks until the execution has caught up +const CatchUpThreshold = 500 + +func NewThrottleEngine( + blocks storage.Blocks, + handler BlockHandler, + log zerolog.Logger, + state protocol.State, + execState state.ExecutionState, + headers storage.Headers, + catchupThreshold int, +) (*component.ComponentManager, error) { + throttle, err := NewThrottle(log, state, execState, headers, catchupThreshold) + if err != nil { + return nil, fmt.Errorf("could not create throttle: %w", err) + } + + e := component.NewComponentManagerBuilder(). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + processables := make(chan flow.Identifier, 1) + + go func() { + err := forwardProcessableToHandler(ctx, blocks, handler, processables) + if err != nil { + ctx.Throw(err) + } + }() + + log.Info().Msg("initializing throttle engine") + + err = throttle.Init(processables) + if err != nil { + ctx.Throw(err) + } + + log.Info().Msgf("throttle engine initialized") + + ready() + }). + Build() + return e, nil +} + +func forwardProcessableToHandler( + ctx context.Context, + blocks storage.Blocks, + handler BlockHandler, + processables <-chan flow.Identifier, +) error { + for { + select { + case <-ctx.Done(): + return nil + case blockID := <-processables: + block, err := blocks.ByID(blockID) + if err != nil { + return fmt.Errorf("could not get block: %w", err) + } + + err = handler.OnBlock(block) + if err != nil { + return fmt.Errorf("could not process block: %w", err) + } + } + } +} + +// Throttle is a helper struct that helps throttle the unexecuted blocks to be sent +// to the block queue for execution. +// It is useful for case when execution is falling far behind the finalization, in which case +// we want to throttle the blocks to be sent to the block queue for fetching data to execute +// them. Without throttle, the block queue will be flooded with blocks, and the network +// will be flooded with requests fetching collections, and the EN might quickly run out of memory. +type Throttle struct { + // config + threshold int // catch up threshold + + // state + mu sync.Mutex + executed uint64 + finalized uint64 + inited bool + + // notifier + processables chan<- flow.Identifier + + // dependencies + log zerolog.Logger + state protocol.State + headers storage.Headers +} + +type BlockHandler interface { + OnBlock(block *flow.Block) error +} + +func NewThrottle( + log zerolog.Logger, + state protocol.State, + execState state.ExecutionState, + headers storage.Headers, + catchupThreshold int, +) (*Throttle, error) { + finalizedHead, err := state.Final().Head() + if err != nil { + return nil, fmt.Errorf("could not get finalized head: %w", err) + } + + finalized := finalizedHead.Height + // TODO: implement GetHighestFinalizedExecuted for execution state when storehouse + // is not used + executed := execState.GetHighestFinalizedExecuted() + + if executed > finalized { + return nil, fmt.Errorf("executed finalized %v is greater than finalized %v", executed, finalized) + } + + return &Throttle{ + threshold: catchupThreshold, + executed: executed, + finalized: finalized, + + log: log.With().Str("component", "throttle").Logger(), + state: state, + headers: headers, + }, nil +} + +func (c *Throttle) Init(processables chan<- flow.Identifier) error { + c.mu.Lock() + defer c.mu.Unlock() + if c.inited { + return fmt.Errorf("throttle already inited") + } + + c.inited = true + + var unexecuted []flow.Identifier + var err error + if caughtUp(c.executed, c.finalized, c.threshold) { + unexecuted, err = findAllUnexecutedBlocks(c.state, c.headers, c.executed, c.finalized) + if err != nil { + return err + } + } else { + unexecuted, err = findFinalized(c.state, c.headers, c.executed, c.executed+500) + if err != nil { + return err + } + } + + for _, id := range unexecuted { + c.processables <- id + } + + return nil +} + +func (c *Throttle) OnBlockExecuted(executed uint64, _ flow.Identifier) error { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.inited { + return fmt.Errorf("throttle not inited") + } + + // we have already caught up, ignore + if c.caughtUp() { + return nil + } + + // the execution is still far behind from finalization + c.executed = executed + if !c.caughtUp() { + return nil + } + + c.log.Info().Uint64("executed", executed).Uint64("finalized", c.finalized). + Msgf("execution has caught up, processing remaining unexecuted blocks") + + // if the execution have just caught up close enough to the latest finalized blocks, + // then process all unexecuted blocks, including finalized unexecuted and pending unexecuted + unexecuted, err := findAllUnexecutedBlocks(c.state, c.headers, c.executed, c.finalized) + if err != nil { + return fmt.Errorf("could not find unexecuted blocks for processing: %w", err) + } + + c.log.Info().Int("unexecuted", len(unexecuted)).Msgf("forwarding unexecuted blocks") + + for _, id := range unexecuted { + c.processables <- id + } + + c.log.Info().Msgf("all unexecuted blocks have been processed") + + return nil +} + +func (c *Throttle) BlockProcessable(block *flow.Header, qc *flow.QuorumCertificate) { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.inited { + return + } + + // ignore the block if has not caught up. + if !c.caughtUp() { + return + } + + // if has caught up, then process the block + c.processables <- qc.BlockID +} + +func (c *Throttle) OnBlockFinalized(lastFinalized *flow.Header) { + c.mu.Lock() + defer c.mu.Unlock() + if !c.inited { + return + } + + if c.caughtUp() { + return + } + + if lastFinalized.Height <= c.finalized { + return + } + + c.finalized = lastFinalized.Height +} + +func (c *Throttle) caughtUp() bool { + return caughtUp(c.executed, c.finalized, c.threshold) +} + +func caughtUp(executed, finalized uint64, threshold int) bool { + return finalized <= executed+uint64(threshold) +} + +func findFinalized(state protocol.State, headers storage.Headers, lastExecuted, finalizedHeight uint64) ([]flow.Identifier, error) { + // get finalized height + finalized := state.AtHeight(finalizedHeight) + final, err := finalized.Head() + if err != nil { + return nil, fmt.Errorf("could not get finalized block: %w", err) + } + + // dynamically bootstrapped execution node will have highest finalized executed as sealed root, + // which is lower than finalized root. so we will reload blocks from + // [sealedRoot.Height + 1, finalizedRoot.Height] and execute them on startup. + unexecutedFinalized := make([]flow.Identifier, 0) + + // starting from the first unexecuted block, go through each unexecuted and finalized block + // reload its block to execution queues + // loading finalized blocks + for height := lastExecuted + 1; height <= final.Height; height++ { + finalizedID, err := headers.BlockIDByHeight(height) + if err != nil { + return nil, fmt.Errorf("could not get header at height: %v, %w", height, err) + } + + unexecutedFinalized = append(unexecutedFinalized, finalizedID) + } + return unexecutedFinalized, nil +} + +func findAllUnexecutedBlocks(state protocol.State, headers storage.Headers, lastExecuted, finalizedHeight uint64) ([]flow.Identifier, error) { + unexecutedFinalized, err := findFinalized(state, headers, lastExecuted, finalizedHeight) + if err != nil { + return nil, fmt.Errorf("could not find finalized unexecuted blocks: %w", err) + } + + // loaded all pending blocks + pendings, err := state.AtHeight(finalizedHeight).Descendants() + if err != nil { + return nil, fmt.Errorf("could not get descendants of finalized block: %w", err) + } + + unexecuted := append(unexecutedFinalized, pendings...) + return unexecuted, nil +} From d5aec198b4c59774d09796059d16c573bc099a61 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 1 Feb 2024 08:55:55 -0800 Subject: [PATCH 016/148] increase buffer size --- engine/execution/ingestion/throttle.go | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/engine/execution/ingestion/throttle.go b/engine/execution/ingestion/throttle.go index 5e6612e8c9b..05ee256ba73 100644 --- a/engine/execution/ingestion/throttle.go +++ b/engine/execution/ingestion/throttle.go @@ -20,9 +20,8 @@ import ( const CatchUpThreshold = 500 func NewThrottleEngine( - blocks storage.Blocks, - handler BlockHandler, log zerolog.Logger, + handler BlockHandler, state protocol.State, execState state.ExecutionState, headers storage.Headers, @@ -35,10 +34,19 @@ func NewThrottleEngine( e := component.NewComponentManagerBuilder(). AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - processables := make(chan flow.Identifier, 1) + // TODO: config the buffer size + // since the handler.OnBlock method could be blocking, we need to make sure + // the channel has enough buffer space to hold the unprocessed blocks. + // if the channel is full, then it will block the follower engine from + // delivering new blocks until the channel is not full, which could be + // useful because we probably don't want to process too many blocks if + // the execution is not fast enough or even stopped. + // TODO: wrap the channel so that we can report acurate metrics about the + // buffer size + processables := make(chan flow.Identifier, 10000) go func() { - err := forwardProcessableToHandler(ctx, blocks, handler, processables) + err := forwardProcessableToHandler(ctx, headers, handler, processables) if err != nil { ctx.Throw(err) } @@ -61,7 +69,7 @@ func NewThrottleEngine( func forwardProcessableToHandler( ctx context.Context, - blocks storage.Blocks, + headers storage.Headers, handler BlockHandler, processables <-chan flow.Identifier, ) error { @@ -70,7 +78,7 @@ func forwardProcessableToHandler( case <-ctx.Done(): return nil case blockID := <-processables: - block, err := blocks.ByID(blockID) + block, err := headers.ByBlockID(blockID) if err != nil { return fmt.Errorf("could not get block: %w", err) } @@ -109,7 +117,7 @@ type Throttle struct { } type BlockHandler interface { - OnBlock(block *flow.Block) error + OnBlock(block *flow.Header) error } func NewThrottle( From 0604cc9cd1d515295f04382fd13f5e23209fdc7c Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 1 Feb 2024 17:08:51 -0800 Subject: [PATCH 017/148] remove throttle engine --- engine/execution/ingestion/throttle.go | 75 -------------------------- 1 file changed, 75 deletions(-) diff --git a/engine/execution/ingestion/throttle.go b/engine/execution/ingestion/throttle.go index 05ee256ba73..fab5d98f8f0 100644 --- a/engine/execution/ingestion/throttle.go +++ b/engine/execution/ingestion/throttle.go @@ -1,14 +1,11 @@ package ingestion import ( - "context" "fmt" "sync" "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/rs/zerolog" @@ -19,78 +16,6 @@ import ( // blocks until the execution has caught up const CatchUpThreshold = 500 -func NewThrottleEngine( - log zerolog.Logger, - handler BlockHandler, - state protocol.State, - execState state.ExecutionState, - headers storage.Headers, - catchupThreshold int, -) (*component.ComponentManager, error) { - throttle, err := NewThrottle(log, state, execState, headers, catchupThreshold) - if err != nil { - return nil, fmt.Errorf("could not create throttle: %w", err) - } - - e := component.NewComponentManagerBuilder(). - AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - // TODO: config the buffer size - // since the handler.OnBlock method could be blocking, we need to make sure - // the channel has enough buffer space to hold the unprocessed blocks. - // if the channel is full, then it will block the follower engine from - // delivering new blocks until the channel is not full, which could be - // useful because we probably don't want to process too many blocks if - // the execution is not fast enough or even stopped. - // TODO: wrap the channel so that we can report acurate metrics about the - // buffer size - processables := make(chan flow.Identifier, 10000) - - go func() { - err := forwardProcessableToHandler(ctx, headers, handler, processables) - if err != nil { - ctx.Throw(err) - } - }() - - log.Info().Msg("initializing throttle engine") - - err = throttle.Init(processables) - if err != nil { - ctx.Throw(err) - } - - log.Info().Msgf("throttle engine initialized") - - ready() - }). - Build() - return e, nil -} - -func forwardProcessableToHandler( - ctx context.Context, - headers storage.Headers, - handler BlockHandler, - processables <-chan flow.Identifier, -) error { - for { - select { - case <-ctx.Done(): - return nil - case blockID := <-processables: - block, err := headers.ByBlockID(blockID) - if err != nil { - return fmt.Errorf("could not get block: %w", err) - } - - err = handler.OnBlock(block) - if err != nil { - return fmt.Errorf("could not process block: %w", err) - } - } - } -} - // Throttle is a helper struct that helps throttle the unexecuted blocks to be sent // to the block queue for execution. // It is useful for case when execution is falling far behind the finalization, in which case From 04694252669dc3b38e2de593ed26fcc66d0d3f4b Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 1 Feb 2024 17:09:14 -0800 Subject: [PATCH 018/148] lint --- engine/execution/ingestion/throttle.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/engine/execution/ingestion/throttle.go b/engine/execution/ingestion/throttle.go index fab5d98f8f0..38464a5e6fd 100644 --- a/engine/execution/ingestion/throttle.go +++ b/engine/execution/ingestion/throttle.go @@ -4,11 +4,12 @@ import ( "fmt" "sync" + "github.com/rs/zerolog" + "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" - "github.com/rs/zerolog" ) // CatchUpThreshold is the number of blocks that if the execution is far behind From 465a3d025d0324f12929d182880cd6562e32976b Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 2 Feb 2024 10:47:38 -0800 Subject: [PATCH 019/148] rename throttle --- engine/execution/ingestion/throttle.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/engine/execution/ingestion/throttle.go b/engine/execution/ingestion/throttle.go index 38464a5e6fd..b98157dbda2 100644 --- a/engine/execution/ingestion/throttle.go +++ b/engine/execution/ingestion/throttle.go @@ -17,13 +17,13 @@ import ( // blocks until the execution has caught up const CatchUpThreshold = 500 -// Throttle is a helper struct that helps throttle the unexecuted blocks to be sent +// BlockThrottle is a helper struct that throttles the unexecuted blocks to be sent // to the block queue for execution. // It is useful for case when execution is falling far behind the finalization, in which case // we want to throttle the blocks to be sent to the block queue for fetching data to execute // them. Without throttle, the block queue will be flooded with blocks, and the network // will be flooded with requests fetching collections, and the EN might quickly run out of memory. -type Throttle struct { +type BlockThrottle struct { // config threshold int // catch up threshold @@ -46,13 +46,13 @@ type BlockHandler interface { OnBlock(block *flow.Header) error } -func NewThrottle( +func NewBlockThrottle( log zerolog.Logger, state protocol.State, execState state.ExecutionState, headers storage.Headers, catchupThreshold int, -) (*Throttle, error) { +) (*BlockThrottle, error) { finalizedHead, err := state.Final().Head() if err != nil { return nil, fmt.Errorf("could not get finalized head: %w", err) @@ -67,7 +67,7 @@ func NewThrottle( return nil, fmt.Errorf("executed finalized %v is greater than finalized %v", executed, finalized) } - return &Throttle{ + return &BlockThrottle{ threshold: catchupThreshold, executed: executed, finalized: finalized, @@ -78,7 +78,7 @@ func NewThrottle( }, nil } -func (c *Throttle) Init(processables chan<- flow.Identifier) error { +func (c *BlockThrottle) Init(processables chan<- flow.Identifier) error { c.mu.Lock() defer c.mu.Unlock() if c.inited { @@ -108,7 +108,7 @@ func (c *Throttle) Init(processables chan<- flow.Identifier) error { return nil } -func (c *Throttle) OnBlockExecuted(executed uint64, _ flow.Identifier) error { +func (c *BlockThrottle) OnBlockExecuted(executed uint64, _ flow.Identifier) error { c.mu.Lock() defer c.mu.Unlock() @@ -148,7 +148,7 @@ func (c *Throttle) OnBlockExecuted(executed uint64, _ flow.Identifier) error { return nil } -func (c *Throttle) BlockProcessable(block *flow.Header, qc *flow.QuorumCertificate) { +func (c *BlockThrottle) BlockProcessable(block *flow.Header, qc *flow.QuorumCertificate) { c.mu.Lock() defer c.mu.Unlock() @@ -165,7 +165,7 @@ func (c *Throttle) BlockProcessable(block *flow.Header, qc *flow.QuorumCertifica c.processables <- qc.BlockID } -func (c *Throttle) OnBlockFinalized(lastFinalized *flow.Header) { +func (c *BlockThrottle) OnBlockFinalized(lastFinalized *flow.Header) { c.mu.Lock() defer c.mu.Unlock() if !c.inited { @@ -183,7 +183,7 @@ func (c *Throttle) OnBlockFinalized(lastFinalized *flow.Header) { c.finalized = lastFinalized.Height } -func (c *Throttle) caughtUp() bool { +func (c *BlockThrottle) caughtUp() bool { return caughtUp(c.executed, c.finalized, c.threshold) } From 669e087d1b259861d691ddeedbff7ae88076caa4 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 2 Feb 2024 10:58:47 -0800 Subject: [PATCH 020/148] update throttle --- engine/execution/ingestion/throttle.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/engine/execution/ingestion/throttle.go b/engine/execution/ingestion/throttle.go index b98157dbda2..16ca08797e9 100644 --- a/engine/execution/ingestion/throttle.go +++ b/engine/execution/ingestion/throttle.go @@ -42,10 +42,6 @@ type BlockThrottle struct { headers storage.Headers } -type BlockHandler interface { - OnBlock(block *flow.Header) error -} - func NewBlockThrottle( log zerolog.Logger, state protocol.State, @@ -108,7 +104,7 @@ func (c *BlockThrottle) Init(processables chan<- flow.Identifier) error { return nil } -func (c *BlockThrottle) OnBlockExecuted(executed uint64, _ flow.Identifier) error { +func (c *BlockThrottle) OnBlockExecuted(_ flow.Identifier, executed uint64) error { c.mu.Lock() defer c.mu.Unlock() @@ -148,21 +144,22 @@ func (c *BlockThrottle) OnBlockExecuted(executed uint64, _ flow.Identifier) erro return nil } -func (c *BlockThrottle) BlockProcessable(block *flow.Header, qc *flow.QuorumCertificate) { +func (c *BlockThrottle) OnBlock(blockID flow.Identifier) error { c.mu.Lock() defer c.mu.Unlock() if !c.inited { - return + return fmt.Errorf("throttle not inited") } // ignore the block if has not caught up. if !c.caughtUp() { - return + return nil } // if has caught up, then process the block - c.processables <- qc.BlockID + c.processables <- blockID + return nil } func (c *BlockThrottle) OnBlockFinalized(lastFinalized *flow.Header) { From 863115b61278043633702567bd0040b088ddca0e Mon Sep 17 00:00:00 2001 From: Andrii Date: Wed, 27 Mar 2024 13:37:09 +0200 Subject: [PATCH 021/148] Linted --- .../access/cohort2/observer_indexer_enabled_extended_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go index 475768d8b90..e91d3668739 100644 --- a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go +++ b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "github.com/onflow/flow/protobuf/go/flow/entities" "testing" "time" @@ -24,6 +23,7 @@ import ( "github.com/onflow/flow-go/utils/unittest" accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" ) func TestObserverIndexerEnabledExtended(t *testing.T) { From f4edafd5463f0ffba6311d1324b9ceadcebeceed Mon Sep 17 00:00:00 2001 From: Andrii Date: Thu, 28 Mar 2024 18:33:46 +0200 Subject: [PATCH 022/148] Added more api calls to test --- access/handler.go | 4 +- engine/access/apiproxy/access_api_proxy.go | 6 - .../observer_indexer_enabled_extended_test.go | 604 +++++++++++++----- .../cohort2/observer_indexer_enabled_test.go | 4 +- 4 files changed, 458 insertions(+), 160 deletions(-) diff --git a/access/handler.go b/access/handler.go index 71e48511aca..aeb5c03d2ec 100644 --- a/access/handler.go +++ b/access/handler.go @@ -708,9 +708,9 @@ func (h *Handler) GetExecutionResultForBlockID(ctx context.Context, req *access. func (h *Handler) GetExecutionResultByID(ctx context.Context, req *access.GetExecutionResultByIDRequest) (*access.ExecutionResultByIDResponse, error) { metadata := h.buildMetadataResponse() - blockID := convert.MessageToIdentifier(req.GetId()) + resultId := convert.MessageToIdentifier(req.GetId()) - result, err := h.api.GetExecutionResultByID(ctx, blockID) + result, err := h.api.GetExecutionResultByID(ctx, resultId) if err != nil { return nil, err } diff --git a/engine/access/apiproxy/access_api_proxy.go b/engine/access/apiproxy/access_api_proxy.go index d57f1681700..1ee0cb3fe7e 100644 --- a/engine/access/apiproxy/access_api_proxy.go +++ b/engine/access/apiproxy/access_api_proxy.go @@ -197,12 +197,6 @@ func (h *FlowAccessAPIRouter) GetSystemTransaction(context context.Context, req } func (h *FlowAccessAPIRouter) GetSystemTransactionResult(context context.Context, req *access.GetSystemTransactionResultRequest) (*access.TransactionResultResponse, error) { - if h.useIndex { - res, err := h.local.GetSystemTransactionResult(context, req) - h.log(LocalApiService, "GetSystemTransactionResult", err) - return res, err - } - res, err := h.upstream.GetSystemTransactionResult(context, req) h.log(UpstreamApiService, "GetSystemTransactionResult", err) return res, err diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go index e91d3668739..72f714874df 100644 --- a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go +++ b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "github.com/onflow/flow-go/engine/common/rpc/convert" "testing" "time" @@ -168,33 +169,23 @@ func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() } require.NotEqual(t, sdk.EmptyAddress, newAccountAddress) - // now we can query events using observer to data which has to be locally indexed + // now we can query events using observerLocal to data which has to be locally indexed - access, err := s.getClient(s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort)) + // get an access node client + accessNode, err := s.getClient(s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort)) require.NoError(t, err) - // get an observer client - observer, err := s.getObserverClient() + // get an observer with indexer enabled client + observerLocal, err := s.getObserverClient() require.NoError(t, err) - observer_2, err := s.getClient(s.net.ContainerByName("observer_2").Addr(testnet.GRPCPort)) + // get an upstream observer client + observerUpstream, err := s.getClient(s.net.ContainerByName("observer_2").Addr(testnet.GRPCPort)) require.NoError(t, err) - // wait for data to be synced by observer + // wait for data to be synced by observerLocal require.Eventually(t, func() bool { - _, err := observer.GetAccount(ctx, &accessproto.GetAccountRequest{ - Address: newAccountAddress.Bytes(), - }) - statusErr, ok := status.FromError(err) - if !ok || err == nil { - return true - } - return statusErr.Code() != codes.OutOfRange - }, 30*time.Second, 1*time.Second) - - // wait for data to be synced by observer - require.Eventually(t, func() bool { - _, err := observer.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + _, err := observerLocal.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ Address: newAccountAddress.Bytes(), BlockHeight: accountCreationTxRes.BlockHeight, }) @@ -205,212 +196,523 @@ func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() return statusErr.Code() != codes.OutOfRange }, 30*time.Second, 1*time.Second) - // wait for data to be synced by observer - require.Eventually(t, func() bool { - _, err := observer.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ - Address: newAccountAddress.Bytes(), - }) - statusErr, ok := status.FromError(err) - if !ok || err == nil { - return true - } - return statusErr.Code() != codes.OutOfRange - }, 30*time.Second, 1*time.Second) + log := unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + log.Info().Msg("================> onverted.Payload.Results") - blockWithAccount, err := observer.GetBlockHeaderByID(ctx, &accessproto.GetBlockHeaderByIDRequest{ - Id: accountCreationTxRes.BlockID[:], + blockWithAccount, err := observerLocal.GetBlockByID(ctx, &accessproto.GetBlockByIDRequest{ + Id: accountCreationTxRes.BlockID[:], + FullBlockResponse: true, }) require.NoError(t, err) - blockWithAccount_2, err := observer_2.GetBlockHeaderByID(ctx, &accessproto.GetBlockHeaderByIDRequest{ - Id: accountCreationTxRes.BlockID[:], - }) + // GetEventsForBlockIDs + eventsByBlockID := s.TestGetEventsForBlockIDsObserverRPC(ctx, observerLocal, observerUpstream, accessNode, [][]byte{blockWithAccount.Block.Id}) + + // GetEventsForHeightRange + eventsByHeight := s.TestGetEventsForHeightRangeObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Height, blockWithAccount.Block.Height) + + // validate that there is an event that we are looking for + require.Equal(t, eventsByHeight.Results, eventsByBlockID.Results) + + var txIndex uint32 + found := false + for _, eventsInBlock := range eventsByHeight.Results { + for _, event := range eventsInBlock.Events { + if event.Type == sdk.EventAccountCreated { + if bytes.Equal(event.Payload, accountCreatedPayload) { + found = true + txIndex = event.TransactionIndex + } + } + } + } + require.True(t, found) + + // GetSystemTransaction + s.TestGetSystemTransactionObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) + + converted, err := convert.MessageToBlock(blockWithAccount.Block) require.NoError(t, err) - accessEventsByBlockID, err := access.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + resultId := converted.Payload.Results[0].ID() + + // GetExecutionResultByID + s.TestGetExecutionResultByIDObserverRPC(ctx, observerLocal, observerUpstream, accessNode, convert.IdentifierToMessage(resultId)) + + //GetTransaction + s.TestGetTransactionObserverRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.TransactionID.Bytes(), blockWithAccount.Block.Id, nil) + + // GetTransactionResult + s.TestGetTransactionResultObserverRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.TransactionID.Bytes(), blockWithAccount.Block.Id, accountCreationTxRes.CollectionID.Bytes()) + + //GetTransactionResultByIndex + s.TestGetTransactionResultsByIndexIDObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id, txIndex) + + // GetTransactionResultsByBlockID + s.TestGetTransactionResultsByBlockIDObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) + + // GetTransactionsByBlockID + s.TestGetTransactionsByBlockIDObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) + + // GetCollectionByID + s.TestGetCollectionByIDObserverRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.CollectionID.Bytes()) + + // ExecuteScriptAtBlockHeight + s.TestExecuteScriptAtBlockHeightObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Height, []byte(simpleScript)) + + // ExecuteScriptAtBlockID + s.TestExecuteScriptAtBlockIDObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id, []byte(simpleScript)) + + // GetAccountAtBlockHeight + s.TestGetAccountAtBlockHeightObserverRPC(ctx, observerLocal, observerUpstream, accessNode, newAccountAddress.Bytes(), accountCreationTxRes.BlockHeight) + + // GetAccount + //getAccountObserver1Response, err := observerLocal.GetAccount(ctx, &accessproto.GetAccountRequest{ + // Address: newAccountAddress.Bytes(), + //}) + //require.NoError(t, err) + // + //getAccountObserver2Response, err := observerUpstream.GetAccount(ctx, &accessproto.GetAccountRequest{ + // Address: newAccountAddress.Bytes(), + //}) + //require.NoError(t, err) + // + //getAccountAccessResponse, err := accessNode.GetAccount(ctx, &accessproto.GetAccountRequest{ + // Address: newAccountAddress.Bytes(), + //}) + //require.NoError(t, err) + // + //require.Equal(t, getAccountAccessResponse.Account, getAccountObserver2Response.Account) + //require.Equal(t, getAccountAccessResponse.Account, getAccountObserver1Response.Account) + + //GetAccountAtLatestBlock + //getAccountAtLatestBlockObserver1Response, err := observerLocal.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ + // Address: newAccountAddress.Bytes(), + //}) + //require.NoError(t, err) + // + //getAccountAtLatestBlockObserver2Response, err := observerUpstream.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ + // Address: newAccountAddress.Bytes(), + //}) + //require.NoError(t, err) + // + //getAccountAtLatestBlockAccessResponse, err := accessNode.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ + // Address: newAccountAddress.Bytes(), + //}) + //require.NoError(t, err) + // + //require.Equal(t, getAccountAtLatestBlockObserver2Response.Account, getAccountAtLatestBlockAccessResponse.Account) + //require.Equal(t, getAccountAtLatestBlockObserver1Response.Account, getAccountAtLatestBlockAccessResponse.Account) +} + +func (s *ObserverIndexerEnabledExtendedSuite) TestGetEventsForBlockIDsObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockIds [][]byte, +) *accessproto.EventsResponse { + observerLocalResponse, err := observerLocal.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ Type: sdk.EventAccountCreated, - BlockIds: [][]byte{blockWithAccount.Block.Id}, + BlockIds: blockIds, EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, }) - require.NoError(t, err) + require.NoError(s.T(), err) - eventsByBlockID, err := observer.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + observerUpstreamResponse, err := observerUpstream.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ Type: sdk.EventAccountCreated, - BlockIds: [][]byte{blockWithAccount.Block.Id}, + BlockIds: blockIds, EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, }) - require.NoError(t, err) + require.NoError(s.T(), err) - eventsByBlockID_2, err := observer_2.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + accessNodeResponse, err := accessNode.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ Type: sdk.EventAccountCreated, - BlockIds: [][]byte{blockWithAccount_2.Block.Id}, + BlockIds: blockIds, EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, }) - require.NoError(t, err) + require.NoError(s.T(), err) - require.Equal(t, eventsByBlockID.Results, accessEventsByBlockID.Results) - require.Equal(t, eventsByBlockID_2.Results, accessEventsByBlockID.Results) + require.Equal(s.T(), accessNodeResponse.Results, observerLocalResponse.Results) + require.Equal(s.T(), accessNodeResponse.Results, observerUpstreamResponse.Results) - // GetEventsForHeightRange - accessEventsByHeight, err := access.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + return observerLocalResponse +} + +func (s *ObserverIndexerEnabledExtendedSuite) TestGetEventsForHeightRangeObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + startHeight uint64, + endHeight uint64, +) *accessproto.EventsResponse { + observerLocalResponse, err := observerLocal.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ Type: sdk.EventAccountCreated, - StartHeight: blockWithAccount.Block.Height, - EndHeight: blockWithAccount.Block.Height, + StartHeight: startHeight, + EndHeight: endHeight, EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, }) - require.NoError(t, err) + require.NoError(s.T(), err) - eventsByHeight, err := observer.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + observerUpstreamResponse, err := observerUpstream.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ Type: sdk.EventAccountCreated, - StartHeight: blockWithAccount.Block.Height, - EndHeight: blockWithAccount.Block.Height, + StartHeight: startHeight, + EndHeight: endHeight, EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, }) - require.NoError(t, err) + require.NoError(s.T(), err) - eventsByHeight_2, err := observer_2.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + accessNodeResponse, err := accessNode.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ Type: sdk.EventAccountCreated, - StartHeight: blockWithAccount_2.Block.Height, - EndHeight: blockWithAccount_2.Block.Height, + StartHeight: startHeight, + EndHeight: endHeight, EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, }) - require.NoError(t, err) + require.NoError(s.T(), err) - require.Equal(t, eventsByHeight.Results, accessEventsByHeight.Results) - require.Equal(t, eventsByHeight_2.Results, accessEventsByHeight.Results) + require.Equal(s.T(), accessNodeResponse.Results, observerLocalResponse.Results) + require.Equal(s.T(), accessNodeResponse.Results, observerUpstreamResponse.Results) - // validate that there is an event that we are looking for - require.Equal(t, eventsByHeight.Results, eventsByBlockID.Results) - found := false - for _, eventsInBlock := range eventsByHeight.Results { - for _, event := range eventsInBlock.Events { - if event.Type == sdk.EventAccountCreated { - if bytes.Equal(event.Payload, accountCreatedPayload) { - found = true - } - } - } - } - require.True(t, found) + return observerLocalResponse +} - // GetAccount - getAccountObserver1Response, err := observer.GetAccount(ctx, &accessproto.GetAccountRequest{ - Address: newAccountAddress.Bytes(), +func (s *ObserverIndexerEnabledExtendedSuite) TestGetAccountAtBlockHeightObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + accountAddress []byte, + blockHeight uint64, +) { + + observerLocalResponse, err := observerLocal.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: accountAddress, + BlockHeight: blockHeight, }) - require.NoError(t, err) + require.NoError(s.T(), err) - getAccountObserver2Response, err := observer_2.GetAccount(ctx, &accessproto.GetAccountRequest{ - Address: newAccountAddress.Bytes(), + observerUpstreamResponse, err := observerUpstream.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: accountAddress, + BlockHeight: blockHeight, }) - require.NoError(t, err) + require.NoError(s.T(), err) - getAccountAccessResponse, err := access.GetAccount(ctx, &accessproto.GetAccountRequest{ - Address: newAccountAddress.Bytes(), + accessNodeResponse, err := accessNode.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: accountAddress, + BlockHeight: blockHeight, }) - require.NoError(t, err) + require.NoError(s.T(), err) - require.Equal(t, getAccountAccessResponse.Account, getAccountObserver2Response.Account) - require.Equal(t, getAccountAccessResponse.Account, getAccountObserver1Response.Account) + require.Equal(s.T(), accessNodeResponse.Account, observerLocalResponse.Account) + require.Equal(s.T(), accessNodeResponse.Account, observerUpstreamResponse.Account) +} - // GetAccountAtBlockHeight - getAccountAtBlockHeightObserver1Response, err := observer.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ - Address: newAccountAddress.Bytes(), - BlockHeight: accountCreationTxRes.BlockHeight, +func (s *ObserverIndexerEnabledExtendedSuite) TestGetSystemTransactionObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockId []byte, +) { + + observerLocalResponse, err := observerLocal.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ + BlockId: blockId, }) - require.NoError(t, err) + require.NoError(s.T(), err) - getAccountAtBlockHeightObserver2Response, err := observer_2.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ - Address: newAccountAddress.Bytes(), - BlockHeight: accountCreationTxRes.BlockHeight, + observerUpstreamResponse, err := observerUpstream.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ + BlockId: blockId, }) - require.NoError(t, err) + require.NoError(s.T(), err) - getAccountAtBlockHeightAccessResponse, err := access.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ - Address: newAccountAddress.Bytes(), - BlockHeight: accountCreationTxRes.BlockHeight, + accessNodeResponse, err := accessNode.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ + BlockId: blockId, }) - require.NoError(t, err) + require.NoError(s.T(), err) - require.Equal(t, getAccountAtBlockHeightObserver2Response.Account, getAccountAtBlockHeightAccessResponse.Account) - require.Equal(t, getAccountAtBlockHeightObserver1Response.Account, getAccountAtBlockHeightAccessResponse.Account) + require.Equal(s.T(), accessNodeResponse.Transaction, observerLocalResponse.Transaction) + require.Equal(s.T(), accessNodeResponse.Transaction, observerUpstreamResponse.Transaction) +} - //GetAccountAtLatestBlock - getAccountAtLatestBlockObserver1Response, err := observer.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ - Address: newAccountAddress.Bytes(), +func (s *ObserverIndexerEnabledExtendedSuite) TestGetExecutionResultByIDObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + id []byte, +) { + + observerLocalResponse, err := observerLocal.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ + Id: id, }) - require.NoError(t, err) + require.NoError(s.T(), err) - getAccountAtLatestBlockObserver2Response, err := observer_2.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ - Address: newAccountAddress.Bytes(), + observerUpstreamResponse, err := observerUpstream.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ + Id: id, }) - require.NoError(t, err) + require.NoError(s.T(), err) - getAccountAtLatestBlockAccessResponse, err := access.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ - Address: newAccountAddress.Bytes(), + accessNodeResponse, err := accessNode.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ + Id: id, }) - require.NoError(t, err) + require.NoError(s.T(), err) - require.Equal(t, getAccountAtLatestBlockObserver2Response.Account, getAccountAtLatestBlockAccessResponse.Account) - require.Equal(t, getAccountAtLatestBlockObserver1Response.Account, getAccountAtLatestBlockAccessResponse.Account) + require.Equal(s.T(), accessNodeResponse.ExecutionResult, observerLocalResponse.ExecutionResult) + require.Equal(s.T(), accessNodeResponse.ExecutionResult, observerUpstreamResponse.ExecutionResult) +} - // GetSystemTransaction - getSystemTransactionObserver1Response, err := observer.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ - BlockId: blockWithAccount.Block.Id, +func (s *ObserverIndexerEnabledExtendedSuite) TestGetTransactionObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + id []byte, + blockId []byte, + collectionId []byte, +) { + + observerLocalResponse, err := observerLocal.GetTransaction(ctx, &accessproto.GetTransactionRequest{ + Id: id, + BlockId: blockId, + CollectionId: collectionId, }) - require.NoError(t, err) + require.NoError(s.T(), err) - getSystemTransactionObserver2Response, err := observer_2.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ - BlockId: blockWithAccount.Block.Id, + observerUpstreamResponse, err := observerUpstream.GetTransaction(ctx, &accessproto.GetTransactionRequest{ + Id: id, + BlockId: blockId, + CollectionId: collectionId, }) - require.NoError(t, err) + require.NoError(s.T(), err) - getSystemTransactionAccessResponse, err := access.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ - BlockId: blockWithAccount.Block.Id, + accessNodeResponse, err := accessNode.GetTransaction(ctx, &accessproto.GetTransactionRequest{ + Id: id, + BlockId: blockId, + CollectionId: collectionId, }) - require.NoError(t, err) + require.NoError(s.T(), err) - require.Equal(t, getSystemTransactionObserver2Response.Transaction, getSystemTransactionAccessResponse.Transaction) - require.Equal(t, getSystemTransactionObserver1Response.Transaction, getSystemTransactionAccessResponse.Transaction) + require.Equal(s.T(), accessNodeResponse.Transaction, observerLocalResponse.Transaction) + require.Equal(s.T(), accessNodeResponse.Transaction, observerUpstreamResponse.Transaction) +} + +func (s *ObserverIndexerEnabledExtendedSuite) TestGetTransactionResultObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + id []byte, + blockId []byte, + collectionId []byte, +) { + + observerLocalResponse, err := observerLocal.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ + Id: id, + BlockId: blockId, + CollectionId: collectionId, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ + Id: id, + BlockId: blockId, + CollectionId: collectionId, + }) + require.NoError(s.T(), err) - // GetSystemTransactionResult - getSystemTransactionResultObserver2Response, err := observer_2.GetSystemTransactionResult(ctx, &accessproto.GetSystemTransactionResultRequest{ - BlockId: blockWithAccount.Block.Id, + accessNodeResponse, err := accessNode.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ + Id: id, + BlockId: blockId, + CollectionId: collectionId, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Events, observerLocalResponse.Events) + require.Equal(s.T(), accessNodeResponse.Events, observerUpstreamResponse.Events) +} + +func (s *ObserverIndexerEnabledExtendedSuite) TestGetTransactionResultsByBlockIDObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockId []byte, +) { + + observerLocalResponse, err := observerLocal.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId, EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, }) - require.NoError(t, err) + require.NoError(s.T(), err) - getSystemTransactionResultAccessResponse, err := access.GetSystemTransactionResult(ctx, &accessproto.GetSystemTransactionResultRequest{ - BlockId: blockWithAccount.Block.Id, + observerUpstreamResponse, err := observerUpstream.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId, EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, }) - require.NoError(t, err) + require.NoError(s.T(), err) - //getSystemTransactionResultObserver1Response, err := observer.GetSystemTransactionResult(ctx, &accessproto.GetSystemTransactionResultRequest{ - // BlockId: blockWithAccount.Block.Id, - // EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - //}) - //require.NoError(t, err) + accessNodeResponse, err := accessNode.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) - require.Equal(t, getSystemTransactionResultObserver2Response.Events, getSystemTransactionResultAccessResponse.Events) - //require.Equal(t, getSystemTransactionResultObserver1Response.Events, getSystemTransactionResultAccessResponse.Events) + require.Equal(s.T(), accessNodeResponse.TransactionResults, observerLocalResponse.TransactionResults) + require.Equal(s.T(), accessNodeResponse.TransactionResults, observerUpstreamResponse.TransactionResults) +} - // GetExecutionResultByID - getExecutionResultByIDObserver1Response, err := observer.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ - Id: blockWithAccount.Block.Id, +func (s *ObserverIndexerEnabledExtendedSuite) TestGetTransactionResultsByIndexIDObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockId []byte, + index uint32, +) { + observerLocalResponse, err := observerLocal.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ + BlockId: blockId, + Index: index, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, }) - require.NoError(t, err) + require.NoError(s.T(), err) - getExecutionResultByIDObserver2Response, err := observer_2.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ - Id: blockWithAccount.Block.Id, + observerUpstreamResponse, err := observerUpstream.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ + BlockId: blockId, + Index: index, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, }) - require.NoError(t, err) + require.NoError(s.T(), err) - getExecutionResultByIDAccessResponse, err := access.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ - Id: blockWithAccount.Block.Id, + accessNodeResponse, err := accessNode.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ + BlockId: blockId, + Index: index, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, }) - require.NoError(t, err) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Events, observerLocalResponse.Events) + require.Equal(s.T(), accessNodeResponse.Events, observerUpstreamResponse.Events) +} + +func (s *ObserverIndexerEnabledExtendedSuite) TestGetTransactionsByBlockIDObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockId []byte, +) { + + observerLocalResponse, err := observerLocal.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Transactions, observerLocalResponse.Transactions) + require.Equal(s.T(), accessNodeResponse.Transactions, observerUpstreamResponse.Transactions) +} + +func (s *ObserverIndexerEnabledExtendedSuite) TestGetCollectionByIDObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + collectionId []byte, +) { - require.Equal(t, getExecutionResultByIDAccessResponse.ExecutionResult, getExecutionResultByIDObserver2Response.ExecutionResult) - require.Equal(t, getExecutionResultByIDAccessResponse.ExecutionResult, getExecutionResultByIDObserver1Response.ExecutionResult) + observerLocalResponse, err := observerLocal.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ + Id: collectionId, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ + Id: collectionId, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ + Id: collectionId, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Collection, observerLocalResponse.Collection) + require.Equal(s.T(), accessNodeResponse.Collection, observerUpstreamResponse.Collection) +} + +func (s *ObserverIndexerEnabledExtendedSuite) TestExecuteScriptAtBlockHeightObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockHeight uint64, + script []byte, +) { + + observerLocalResponse, err := observerLocal.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: blockHeight, + Script: script, + Arguments: make([][]byte, 0), + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: blockHeight, + Script: script, + Arguments: make([][]byte, 0), + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: blockHeight, + Script: script, + Arguments: make([][]byte, 0), + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Value, observerLocalResponse.Value) + require.Equal(s.T(), accessNodeResponse.Value, observerUpstreamResponse.Value) +} + +func (s *ObserverIndexerEnabledExtendedSuite) TestExecuteScriptAtBlockIDObserverRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockId []byte, + script []byte, +) { + + observerLocalResponse, err := observerLocal.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockId, + Script: script, + Arguments: make([][]byte, 0), + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockId, + Script: script, + Arguments: make([][]byte, 0), + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockId, + Script: script, + Arguments: make([][]byte, 0), + }) + require.NoError(s.T(), err) + require.Equal(s.T(), accessNodeResponse.Value, observerLocalResponse.Value) + require.Equal(s.T(), accessNodeResponse.Value, observerUpstreamResponse.Value) } diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_test.go index e8d68c20e90..b8f0f34787d 100644 --- a/integration/tests/access/cohort2/observer_indexer_enabled_test.go +++ b/integration/tests/access/cohort2/observer_indexer_enabled_test.go @@ -23,12 +23,14 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/cadence" accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow/protobuf/go/flow/entities" ) var ( - simpleScript = `pub fun main(): Int { return 42; }` + simpleScript = `pub fun main(): Int { return 42; }` + simpleScriptResult = cadence.NewInt(42) ) func TestObserverIndexerEnabled(t *testing.T) { From ffbd640cdd722e7aa508b0ee0297a5e8bd50d2f5 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Thu, 28 Mar 2024 21:16:04 -0400 Subject: [PATCH 023/148] add happy path tests --- cmd/bootstrap/cmd/final_list.go | 8 +- cmd/bootstrap/cmd/finalize.go | 31 +--- cmd/bootstrap/cmd/partner_infos.go | 4 +- cmd/util/cmd/common/clusters.go | 41 +++++- cmd/util/cmd/common/node_info.go | 15 +- cmd/util/cmd/common/utils.go | 31 +++- cmd/util/cmd/epochs/cmd/recover.go | 66 +++++---- cmd/util/cmd/epochs/cmd/recover_test.go | 49 +++++++ cmd/util/cmd/epochs/cmd/reset_test.go | 25 +--- utils/unittest/service_events_fixtures.go | 167 ++++++++++++---------- 10 files changed, 269 insertions(+), 168 deletions(-) create mode 100644 cmd/util/cmd/epochs/cmd/recover_test.go diff --git a/cmd/bootstrap/cmd/final_list.go b/cmd/bootstrap/cmd/final_list.go index 52db64980f9..f1a1e5b1901 100644 --- a/cmd/bootstrap/cmd/final_list.go +++ b/cmd/bootstrap/cmd/final_list.go @@ -242,7 +242,7 @@ func assembleInternalNodesWithoutWeight() []model.NodeInfo { common.ValidateAddressFormat(log, internal.Address) // validate every single internal node - nodeID := ValidateNodeID(internal.NodeID) + nodeID := common.ValidateNodeID(log, internal.NodeID) node := model.NewPrivateNodeInfo( nodeID, internal.Role, @@ -279,9 +279,9 @@ func createPublicNodeInfo(nodes []model.NodeInfoPub) []model.NodeInfo { common.ValidateAddressFormat(log, n.Address) // validate every single partner node - nodeID := ValidateNodeID(n.NodeID) - networkPubKey := ValidateNetworkPubKey(n.NetworkPubKey) - stakingPubKey := ValidateStakingPubKey(n.StakingPubKey) + nodeID := common.ValidateNodeID(log, n.NodeID) + networkPubKey := common.ValidateNetworkPubKey(log, n.NetworkPubKey) + stakingPubKey := common.ValidateStakingPubKey(log, n.StakingPubKey) // all nodes should have equal weight node := model.NewPublicNodeInfo( diff --git a/cmd/bootstrap/cmd/finalize.go b/cmd/bootstrap/cmd/finalize.go index ee4cce5e8c5..e4bf67fca96 100644 --- a/cmd/bootstrap/cmd/finalize.go +++ b/cmd/bootstrap/cmd/finalize.go @@ -20,7 +20,6 @@ import ( "github.com/onflow/flow-go/fvm" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/dkg" - "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/epochs" "github.com/onflow/flow-go/state/protocol" @@ -46,10 +45,7 @@ var ( flagGenesisTokenSupply string ) -// PartnerWeights is the format of the JSON file specifying partner node weights. -type PartnerWeights map[flow.Identifier]uint64 - -// finalizeCmd represents the finalize command +// finalizeCmd represents the finalize command` var finalizeCmd = &cobra.Command{ Use: "finalize", Short: "Finalize the bootstrapping process", @@ -357,31 +353,6 @@ func readDKGData() dkg.DKGData { // Validation utility methods ------------------------------------------------ -func ValidateNodeID(nodeID flow.Identifier) flow.Identifier { - if nodeID == flow.ZeroID { - log.Fatal().Msg("NodeID must not be zero") - } - return nodeID -} - -func ValidateNetworkPubKey(key encodable.NetworkPubKey) encodable.NetworkPubKey { - if key.PublicKey == nil { - log.Fatal().Msg("NetworkPubKey must not be nil") - } - return key -} - -func ValidateStakingPubKey(key encodable.StakingPubKey) encodable.StakingPubKey { - if key.PublicKey == nil { - log.Fatal().Msg("StakingPubKey must not be nil") - } - return key -} - -func ValidateWeight(weight uint64) (uint64, bool) { - return weight, weight > 0 -} - // loadRootProtocolSnapshot loads the root protocol snapshot from disk func loadRootProtocolSnapshot(path string) (*inmem.Snapshot, error) { data, err := io.ReadFile(filepath.Join(flagOutdir, path)) diff --git a/cmd/bootstrap/cmd/partner_infos.go b/cmd/bootstrap/cmd/partner_infos.go index d60fb7ac97e..68bee2a8430 100644 --- a/cmd/bootstrap/cmd/partner_infos.go +++ b/cmd/bootstrap/cmd/partner_infos.go @@ -64,7 +64,7 @@ func populatePartnerInfosRun(_ *cobra.Command, _ []string) { flowClient := getFlowClient() - partnerWeights := make(PartnerWeights) + partnerWeights := make(common.PartnerWeights) skippedNodes := 0 numOfPartnerNodesByRole := map[flow.Role]int{ flow.RoleCollection: 0, @@ -210,7 +210,7 @@ func writeNodePubInfoFile(info *bootstrap.NodeInfoPub) { } // writePartnerWeightsFile writes the partner weights file -func writePartnerWeightsFile(partnerWeights PartnerWeights) { +func writePartnerWeightsFile(partnerWeights common.PartnerWeights) { err := common.WriteJSON(bootstrap.FileNamePartnerWeights, flagOutdir, partnerWeights) if err != nil { log.Fatal().Err(err).Msg("failed to write json") diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index b8055acc2a1..4fe4c4347c8 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -2,9 +2,10 @@ package common import ( "errors" - + "fmt" "github.com/rs/zerolog" + "github.com/onflow/cadence" "github.com/onflow/flow-go/cmd/bootstrap/run" "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" @@ -13,6 +14,7 @@ import ( "github.com/onflow/flow-go/model/flow/assignment" "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/signature" ) // ConstructClusterAssignment random cluster assignment with internal and partner nodes. @@ -104,6 +106,43 @@ func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterLis return qcs } +// ConvertClusterAssignmentsCdc converts golang cluster assignments type to cadence array of arrays. +func ConvertClusterAssignmentsCdc(assignments flow.AssignmentList) cadence.Array { + assignmentsCdc := make([]cadence.Value, len(assignments)) + for i, asmt := range assignments { + fmt.Println(asmt.Len()) + vals := make([]cadence.Value, asmt.Len()) + for j, k := range asmt { + fmt.Println(k.String()) + vals[j] = cadence.String(k.String()) + } + assignmentsCdc[i] = cadence.NewArray(vals).WithType(cadence.NewVariableSizedArrayType(cadence.StringType{})) + } + + return cadence.NewArray(assignmentsCdc).WithType(cadence.NewVariableSizedArrayType(cadence.NewVariableSizedArrayType(cadence.StringType{}))) +} + +// ConvertClusterQcsCdc converts golang cluster qcs type to cadence struct. +func ConvertClusterQcsCdc(qcs []*flow.QuorumCertificate, clusterList flow.ClusterList) ([]*flow.ClusterQCVoteData, error) { + voteData := make([]*flow.ClusterQCVoteData, len(qcs)) + for i, qc := range qcs { + c, ok := clusterList.ByIndex(uint(i)) + if !ok { + return nil, fmt.Errorf("could not get cluster list for cluster index %v", i) + } + voterIds, err := signature.DecodeSignerIndicesToIdentifiers(c.NodeIDs(), qc.SignerIndices) + if err != nil { + return nil, fmt.Errorf("could not decode signer indices: %w", err) + } + voteData[i] = &flow.ClusterQCVoteData{ + SigData: qc.SigData, + VoterIDs: voterIds, + } + } + + return voteData, nil +} + // Filters a list of nodes to include only nodes that will sign the QC for the // given cluster. The resulting list of nodes is only nodes that are in the // given cluster AND are not partner nodes (ie. we have the private keys). diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go index 39aadafb578..2df112f2817 100644 --- a/cmd/util/cmd/common/node_info.go +++ b/cmd/util/cmd/common/node_info.go @@ -5,7 +5,6 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/cmd/bootstrap/cmd" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ) @@ -16,7 +15,7 @@ func ReadPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNodeInf partners := ReadPartnerNodes(log, partnerNodeInfoDir) log.Info().Msgf("read %d partner node configuration files", len(partners)) - var weights cmd.PartnerWeights + var weights PartnerWeights err := ReadJSON(partnerWeightsPath, &weights) if err != nil { log.Fatal().Err(err).Msg("failed to read partner weights json") @@ -26,10 +25,10 @@ func ReadPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNodeInf var nodes []bootstrap.NodeInfo for _, partner := range partners { // validate every single partner node - nodeID := cmd.ValidateNodeID(partner.NodeID) - networkPubKey := cmd.ValidateNetworkPubKey(partner.NetworkPubKey) - stakingPubKey := cmd.ValidateStakingPubKey(partner.StakingPubKey) - weight, valid := cmd.ValidateWeight(weights[partner.NodeID]) + nodeID := ValidateNodeID(log, partner.NodeID) + networkPubKey := ValidateNetworkPubKey(log, partner.NetworkPubKey) + stakingPubKey := ValidateStakingPubKey(log, partner.StakingPubKey) + weight, valid := ValidateWeight(weights[partner.NodeID]) if !valid { log.Error().Msgf("weights: %v", weights) log.Fatal().Msgf("partner node id %x has no weight", nodeID) @@ -91,8 +90,8 @@ func ReadInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, internal ValidateAddressFormat(log, internal.Address) // validate every single internal node - nodeID := cmd.ValidateNodeID(internal.NodeID) - weight, valid := cmd.ValidateWeight(weights[internal.Address]) + nodeID := ValidateNodeID(log, internal.NodeID) + weight, valid := ValidateWeight(weights[internal.Address]) if !valid { log.Error().Msgf("weights: %v", weights) log.Fatal().Msgf("internal node %v has no weight. Did you forget to update the node address?", internal) diff --git a/cmd/util/cmd/common/utils.go b/cmd/util/cmd/common/utils.go index 2f2a8d03e90..a162feb4e65 100644 --- a/cmd/util/cmd/common/utils.go +++ b/cmd/util/cmd/common/utils.go @@ -8,11 +8,12 @@ import ( "path/filepath" "strconv" - "github.com/multiformats/go-multiaddr" "github.com/rs/zerolog" + "github.com/multiformats/go-multiaddr" "github.com/onflow/crypto" "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network/p2p/utils" "github.com/onflow/flow-go/utils/io" @@ -128,3 +129,31 @@ func ValidateAddressFormat(log zerolog.Logger, address string) { _, err = multiaddr.NewMultiaddr(lp2pAddr) checkErr(err) } + +func ValidateNodeID(lg zerolog.Logger, nodeID flow.Identifier) flow.Identifier { + if nodeID == flow.ZeroID { + lg.Fatal().Msg("NodeID must not be zero") + } + return nodeID +} + +func ValidateNetworkPubKey(lg zerolog.Logger, key encodable.NetworkPubKey) encodable.NetworkPubKey { + if key.PublicKey == nil { + lg.Fatal().Msg("NetworkPubKey must not be nil") + } + return key +} + +func ValidateStakingPubKey(lg zerolog.Logger, key encodable.StakingPubKey) encodable.StakingPubKey { + if key.PublicKey == nil { + lg.Fatal().Msg("StakingPubKey must not be nil") + } + return key +} + +func ValidateWeight(weight uint64) (uint64, bool) { + return weight, weight > 0 +} + +// PartnerWeights is the format of the JSON file specifying partner node weights. +type PartnerWeights map[flow.Identifier]uint64 diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 086534769fc..405d46473ee 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -2,6 +2,7 @@ package cmd import ( "context" + "encoding/hex" "fmt" "github.com/spf13/cobra" @@ -20,10 +21,10 @@ import ( // identities, generates the cluster QC's and retrieves the DKG key vector of the last successful epoch. var ( generateRecoverEpochTxArgsCmd = &cobra.Command{ - Use: "generate-efm-recovery-data", + Use: "recover-epoch-tx-args", Short: "Generates recover epoch transaction arguments", Long: "Generates transaction arguments for the epoch recovery transaction.", - Run: generateRecoverEpochTxArgs, + Run: generateRecoverEpochTxArgs(getSnapshot), } flagAnAddress string @@ -31,7 +32,7 @@ var ( flagPartnerWeights string flagPartnerNodeInfoDir string flagInternalNodePrivInfoDir string - flagConfig string + flagNodeConfigJson string flagCollectionClusters int flagStartView uint64 flagStakingEndView uint64 @@ -44,12 +45,10 @@ func init() { } func addGenerateRecoverEpochTxArgsCmdFlags() { - generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagBucketNetworkName, "bucket-network-name", "", - "when retrieving the root snapshot from a GCP bucket, the network name portion of the URL (eg. \"mainnet-13\")") generateRecoverEpochTxArgsCmd.Flags().IntVar(&flagCollectionClusters, "collection-clusters", 0, "number of collection clusters") // required parameters for network configuration and generation of root node identities - generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagConfig, "config", "", + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagNodeConfigJson, "node-config", "", "path to a JSON file containing multiple node configurations (fields Role, Address, Weight)") generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory "+ "containing the output from the `keygen` command for internal nodes") @@ -63,10 +62,7 @@ func addGenerateRecoverEpochTxArgsCmdFlags() { generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEndView, "end-view", 0, "end view of the recovery epoch") } -// generateRecoverEpochTxArgs generates recover epoch transaction arguments from a root protocol state snapshot and writes it to a JSON file -func generateRecoverEpochTxArgs(cmd *cobra.Command, args []string) { - stdout := cmd.OutOrStdout() - +func getSnapshot() *inmem.Snapshot { // get flow client with secure client connection to download protocol snapshot from access node config, err := common.NewFlowClientConfig(flagAnAddress, flagAnPubkey, flow.ZeroID, false) if err != nil { @@ -83,19 +79,28 @@ func generateRecoverEpochTxArgs(cmd *cobra.Command, args []string) { log.Fatal().Err(err).Msg("failed") } - // extract arguments from recover epoch tx from snapshot - txArgs := extractRecoverEpochArgs(snapshot) + return snapshot +} - // encode to JSON - encodedTxArgs, err := epochcmdutil.EncodeArgs(txArgs) - if err != nil { - log.Fatal().Err(err).Msg("could not encode recover epoch transaction arguments") - } +// generateRecoverEpochTxArgs generates recover epoch transaction arguments from a root protocol state snapshot and writes it to a JSON file +func generateRecoverEpochTxArgs(getSnapshot func() *inmem.Snapshot) func(cmd *cobra.Command, args []string) { + return func(cmd *cobra.Command, args []string) { + stdout := cmd.OutOrStdout() - // write JSON args to stdout - _, err = stdout.Write(encodedTxArgs) - if err != nil { - log.Fatal().Err(err).Msg("could not write jsoncdc encoded arguments") + // extract arguments from recover epoch tx from snapshot + txArgs := extractRecoverEpochArgs(getSnapshot()) + + // encode to JSON + encodedTxArgs, err := epochcmdutil.EncodeArgs(txArgs) + if err != nil { + log.Fatal().Err(err).Msg("could not encode recover epoch transaction arguments") + } + + // write JSON args to stdout + _, err = stdout.Write(encodedTxArgs) + if err != nil { + log.Fatal().Err(err).Msg("could not write jsoncdc encoded arguments") + } } } @@ -116,8 +121,8 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { partnerNodes := common.ReadPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) log.Info().Msg("") - log.Info().Msg("generating internal private networking and staking keys") - internalNodes := common.ReadInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) + log.Info().Msg("collecting internal node network and staking keys") + internalNodes := common.ReadInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagNodeConfigJson) log.Info().Msg("") log.Info().Msg("computing collection node clusters") @@ -125,6 +130,7 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { if err != nil { log.Fatal().Err(err).Msg("unable to generate cluster assignment") } + log.Info().Msg("") epochCounter, err := epoch.Counter() @@ -137,14 +143,13 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { log.Info().Msg("constructing root QCs for collection node clusters") clusterQCs := common.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) - fmt.Sprintf("", clusterQCs) log.Info().Msg("") randomSource, err := epoch.RandomSource() if err != nil { log.Fatal().Err(err).Msg("failed to get random source for current epoch") } - randomSourceCdc, err := cadence.NewString(string(randomSource)) + randomSourceCdc, err := cadence.NewString(hex.EncodeToString(randomSource)) if err != nil { log.Fatal().Err(err).Msg("failed to get random source cadence string") } @@ -171,6 +176,14 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { return skeleton }) + // @TODO: cluster qcs are converted into flow.ClusterQCVoteData types, + // we need a corresponding type in cadence on the FlowClusterQC contract + // to store this struct. + _, err = common.ConvertClusterQcsCdc(clusterQCs, clusters) + if err != nil { + log.Fatal().Err(err).Msg("failed to convert cluster qcs to cadence type") + } + args := []cadence.Value{ randomSourceCdc, cadence.NewUInt64(flagStartView), @@ -178,8 +191,7 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { cadence.NewUInt64(flagEndView), cadence.NewArray(dkgPubKeys), cadence.NewArray(nodeIds), - // clusters - // clusterQcs + //common.ConvertClusterAssignmentsCdc(assignments), } return args diff --git a/cmd/util/cmd/epochs/cmd/recover_test.go b/cmd/util/cmd/epochs/cmd/recover_test.go new file mode 100644 index 00000000000..aa6efa9b17e --- /dev/null +++ b/cmd/util/cmd/epochs/cmd/recover_test.go @@ -0,0 +1,49 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestRecoverEpochHappyPath ensures recover epoch transaction arguments are generated as expected. +func TestRecoverEpochHappyPath(t *testing.T) { + // tests that given the root snapshot, the command + // writes the expected arguments to stdout. + utils.RunWithSporkBootstrapDir(t, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { + // create a root snapshot + rootSnapshot := unittest.RootSnapshotFixture(unittest.IdentityListFixture(10, unittest.WithAllRoles())) + + snapshotFn := func() *inmem.Snapshot { return rootSnapshot } + + // run command with overwritten stdout + stdout := bytes.NewBuffer(nil) + generateRecoverEpochTxArgsCmd.SetOut(stdout) + + flagPartnerWeights = partnerWeights + flagPartnerNodeInfoDir = partnerDir + flagInternalNodePrivInfoDir = internalPrivDir + flagNodeConfigJson = configPath + flagCollectionClusters = 2 + flagStartView = 1000 + flagStakingEndView = 2000 + flagEndView = 4000 + + generateRecoverEpochTxArgs(snapshotFn)(generateRecoverEpochTxArgsCmd, nil) + + // read output from stdout + var outputTxArgs []interface{} + err := json.NewDecoder(stdout).Decode(&outputTxArgs) + require.NoError(t, err) + // compare to expected values + expectedArgs := extractRecoverEpochArgs(rootSnapshot) + + unittest.VerifyCdcArguments(t, expectedArgs, outputTxArgs) + }) +} diff --git a/cmd/util/cmd/epochs/cmd/reset_test.go b/cmd/util/cmd/epochs/cmd/reset_test.go index 25983e5cf61..30e7d0178f2 100644 --- a/cmd/util/cmd/epochs/cmd/reset_test.go +++ b/cmd/util/cmd/epochs/cmd/reset_test.go @@ -11,9 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/cadence" - jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/utils/unittest" @@ -50,7 +47,7 @@ func TestReset_LocalSnapshot(t *testing.T) { // compare to expected values expectedArgs := extractResetEpochArgs(rootSnapshot) - verifyArguments(t, expectedArgs, outputTxArgs) + unittest.VerifyCdcArguments(t, expectedArgs, outputTxArgs) }) }) @@ -98,7 +95,7 @@ func TestReset_BucketSnapshot(t *testing.T) { rootSnapshot, err := getSnapshotFromBucket(fmt.Sprintf(rootSnapshotBucketURL, flagBucketNetworkName)) require.NoError(t, err) expectedArgs := extractResetEpochArgs(rootSnapshot) - verifyArguments(t, expectedArgs, outputTxArgs) + unittest.VerifyCdcArguments(t, expectedArgs, outputTxArgs) }) // should output arguments to stdout, including specified payout @@ -120,7 +117,7 @@ func TestReset_BucketSnapshot(t *testing.T) { rootSnapshot, err := getSnapshotFromBucket(fmt.Sprintf(rootSnapshotBucketURL, flagBucketNetworkName)) require.NoError(t, err) expectedArgs := extractResetEpochArgs(rootSnapshot) - verifyArguments(t, expectedArgs, outputTxArgs) + unittest.VerifyCdcArguments(t, expectedArgs, outputTxArgs) }) // with a missing snapshot, should log an error @@ -139,22 +136,6 @@ func TestReset_BucketSnapshot(t *testing.T) { }) } -func verifyArguments(t *testing.T, expected []cadence.Value, actual []interface{}) { - - for index, arg := range actual { - - // marshal to bytes - bz, err := json.Marshal(arg) - require.NoError(t, err) - - // parse cadence value - decoded, err := jsoncdc.Decode(nil, bz) - require.NoError(t, err) - - assert.Equal(t, expected[index], decoded) - } -} - func writeRootSnapshot(bootDir string, snapshot *inmem.Snapshot) error { rootSnapshotPath := filepath.Join(bootDir, bootstrap.PathRootProtocolStateSnapshot) return writeJSON(rootSnapshotPath, snapshot.Encodable()) diff --git a/utils/unittest/service_events_fixtures.go b/utils/unittest/service_events_fixtures.go index 0bd6f77e87c..7f4ad955d56 100644 --- a/utils/unittest/service_events_fixtures.go +++ b/utils/unittest/service_events_fixtures.go @@ -3,12 +3,17 @@ package unittest import ( "crypto/rand" "encoding/hex" + "encoding/json" + "testing" + + json2 "github.com/onflow/cadence/encoding/json" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/onflow/cadence" "github.com/onflow/cadence/encoding/ccf" "github.com/onflow/cadence/runtime/common" "github.com/onflow/crypto" - "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" ) @@ -537,7 +542,7 @@ func createEpochNodes() cadence.Array { func createEpochCollectors() cadence.Array { - clusterType := newFlowClusterQCClusterStructType() + clusterType := NewFlowClusterQCClusterStructType() voteType := newFlowClusterQCVoteStructType() @@ -699,76 +704,6 @@ func createVersionBeaconEvent() cadence.Event { }).WithType(NewNodeVersionBeaconVersionBeaconEventType()) } -func newFlowClusterQCVoteStructType() cadence.Type { - - // A.01cf0e2f2f715450.FlowClusterQC.Vote - - address, _ := common.HexToAddress("01cf0e2f2f715450") - location := common.NewAddressLocation(nil, address, "FlowClusterQC") - - return &cadence.StructType{ - Location: location, - QualifiedIdentifier: "FlowClusterQC.Vote", - Fields: []cadence.Field{ - { - Identifier: "nodeID", - Type: cadence.StringType{}, - }, - { - Identifier: "signature", - Type: cadence.NewOptionalType(cadence.StringType{}), - }, - { - Identifier: "message", - Type: cadence.NewOptionalType(cadence.StringType{}), - }, - { - Identifier: "clusterIndex", - Type: cadence.UInt16Type{}, - }, - { - Identifier: "weight", - Type: cadence.UInt64Type{}, - }, - }, - } -} - -func newFlowClusterQCClusterStructType() *cadence.StructType { - - // A.01cf0e2f2f715450.FlowClusterQC.Cluster - - address, _ := common.HexToAddress("01cf0e2f2f715450") - location := common.NewAddressLocation(nil, address, "FlowClusterQC") - - return &cadence.StructType{ - Location: location, - QualifiedIdentifier: "FlowClusterQC.Cluster", - Fields: []cadence.Field{ - { - Identifier: "index", - Type: cadence.UInt16Type{}, - }, - { - Identifier: "nodeWeights", - Type: cadence.NewDictionaryType(cadence.StringType{}, cadence.UInt64Type{}), - }, - { - Identifier: "totalWeight", - Type: cadence.UInt64Type{}, - }, - { - Identifier: "generatedVotes", - Type: cadence.NewDictionaryType(cadence.StringType{}, newFlowClusterQCVoteStructType()), - }, - { - Identifier: "uniqueVoteMessageTotalWeights", - Type: cadence.NewDictionaryType(cadence.StringType{}, cadence.UInt64Type{}), - }, - }, - } -} - func newFlowIDTableStakingNodeInfoStructType() *cadence.StructType { // A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo @@ -869,7 +804,7 @@ func newFlowEpochEpochSetupEventType() *cadence.EventType { }, { Identifier: "collectorClusters", - Type: cadence.NewVariableSizedArrayType(newFlowClusterQCClusterStructType()), + Type: cadence.NewVariableSizedArrayType(NewFlowClusterQCClusterStructType()), }, { Identifier: "randomSource", @@ -1098,3 +1033,89 @@ var VersionBeaconFixtureCCF = func() []byte { } return b }() + +func newFlowClusterQCVoteStructType() *cadence.StructType { + + // A.01cf0e2f2f715450.FlowClusterQC.Vote + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "FlowClusterQC") + + return &cadence.StructType{ + Location: location, + QualifiedIdentifier: "FlowClusterQC.Vote", + Fields: []cadence.Field{ + { + Identifier: "nodeID", + Type: cadence.StringType{}, + }, + { + Identifier: "signature", + Type: cadence.NewOptionalType(cadence.StringType{}), + }, + { + Identifier: "message", + Type: cadence.NewOptionalType(cadence.StringType{}), + }, + { + Identifier: "clusterIndex", + Type: cadence.UInt16Type{}, + }, + { + Identifier: "weight", + Type: cadence.UInt64Type{}, + }, + }, + } +} + +func VerifyCdcArguments(t *testing.T, expected []cadence.Value, actual []interface{}) { + + for index, arg := range actual { + + // marshal to bytes + bz, err := json.Marshal(arg) + require.NoError(t, err) + + // parse cadence value + decoded, err := json2.Decode(nil, bz) + require.NoError(t, err) + + assert.Equal(t, expected[index], decoded) + } +} + +func NewFlowClusterQCClusterStructType() *cadence.StructType { + + // A.01cf0e2f2f715450.FlowClusterQC.Cluster + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "FlowClusterQC") + + return &cadence.StructType{ + Location: location, + QualifiedIdentifier: "FlowClusterQC.Cluster", + Fields: []cadence.Field{ + { + Identifier: "index", + Type: cadence.UInt16Type{}, + }, + { + Identifier: "nodeWeights", + Type: cadence.NewDictionaryType(cadence.StringType{}, cadence.UInt64Type{}), + }, + { + Identifier: "totalWeight", + Type: cadence.UInt64Type{}, + }, + { + Identifier: "generatedVotes", + Type: cadence.NewDictionaryType(cadence.StringType{}, newFlowClusterQCVoteStructType()), + }, + { + Identifier: "uniqueVoteMessageTotalWeights", + Type: cadence.NewDictionaryType(cadence.StringType{}, cadence.UInt64Type{}), + }, + }, + } +} From 1ffab45c7dac108cb1b9b97957565d310628ffbd Mon Sep 17 00:00:00 2001 From: Andrii Date: Fri, 29 Mar 2024 10:56:25 +0200 Subject: [PATCH 024/148] Added godoc for test --- .../cohort2/observer_indexer_enabled_extended_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go index 72f714874df..fdac11809bb 100644 --- a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go +++ b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go @@ -31,15 +31,16 @@ func TestObserverIndexerEnabledExtended(t *testing.T) { suite.Run(t, new(ObserverIndexerEnabledExtendedSuite)) } -// ObserverIndexerEnabledExtendedSuite tests the observer with the indexer enabled. +// ObserverIndexerEnabledExtendedSuite tests the observer with the indexer enabled, +// observer configured to proxy requests to an access node and access node itself. All responses are compared +// to ensure all of the endpoints are working as expected. // It uses ObserverSuite as a base to reuse the test cases that need to be run for any observer variation. type ObserverIndexerEnabledExtendedSuite struct { ObserverSuite } -// SetupTest sets up the test suite by starting the network and preparing the observer client. -// By overriding this function, we can ensure that the observer is started with correct parameters and select -// the RPCs and REST endpoints that are tested. +// SetupTest sets up the test suite by starting the network and preparing both observer clients and access client to be +// call all endpoints and compare responses. func (s *ObserverIndexerEnabledExtendedSuite) SetupTest() { consensusConfigs := []func(config *testnet.NodeConfig){ // `cruise-ctl-fallback-proposal-duration` is set to 250ms instead to of 100ms From 257eb601f64a12781f53544a4438d79927041319 Mon Sep 17 00:00:00 2001 From: Andrii Date: Fri, 29 Mar 2024 15:56:54 +0200 Subject: [PATCH 025/148] Moved everything to one test --- .../observer_indexer_enabled_extended_test.go | 719 ------------------ .../cohort2/observer_indexer_enabled_test.go | 632 ++++++++++++++- 2 files changed, 614 insertions(+), 737 deletions(-) delete mode 100644 integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go deleted file mode 100644 index fdac11809bb..00000000000 --- a/integration/tests/access/cohort2/observer_indexer_enabled_extended_test.go +++ /dev/null @@ -1,719 +0,0 @@ -package cohort2 - -import ( - "bytes" - "context" - "fmt" - "github.com/onflow/flow-go/engine/common/rpc/convert" - "testing" - "time" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - sdk "github.com/onflow/flow-go-sdk" - sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go-sdk/templates" - "github.com/onflow/flow-go/engine/access/rpc/backend" - "github.com/onflow/flow-go/integration/testnet" - "github.com/onflow/flow-go/integration/tests/lib" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" - - accessproto "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/entities" -) - -func TestObserverIndexerEnabledExtended(t *testing.T) { - suite.Run(t, new(ObserverIndexerEnabledExtendedSuite)) -} - -// ObserverIndexerEnabledExtendedSuite tests the observer with the indexer enabled, -// observer configured to proxy requests to an access node and access node itself. All responses are compared -// to ensure all of the endpoints are working as expected. -// It uses ObserverSuite as a base to reuse the test cases that need to be run for any observer variation. -type ObserverIndexerEnabledExtendedSuite struct { - ObserverSuite -} - -// SetupTest sets up the test suite by starting the network and preparing both observer clients and access client to be -// call all endpoints and compare responses. -func (s *ObserverIndexerEnabledExtendedSuite) SetupTest() { - consensusConfigs := []func(config *testnet.NodeConfig){ - // `cruise-ctl-fallback-proposal-duration` is set to 250ms instead to of 100ms - // to purposely slow down the block rate. This is needed since the crypto module - // update providing faster BLS operations. - // TODO: fix the access integration test logic to function without slowing down - // the block rate - testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=250ms"), - testnet.WithAdditionalFlagf("--required-verification-seal-approvals=%d", 1), - testnet.WithAdditionalFlagf("--required-construction-seal-approvals=%d", 1), - testnet.WithLogLevel(zerolog.FatalLevel), - } - - nodeConfigs := []testnet.NodeConfig{ - // access node with unstaked nodes supported - testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.InfoLevel), - testnet.WithAdditionalFlag("--supports-observer=true"), - testnet.WithAdditionalFlagf("--public-network-execution-data-sync-enabled=true"), - testnet.WithAdditionalFlagf("--script-execution-mode=%s", backend.IndexQueryModeExecutionNodesOnly), - testnet.WithAdditionalFlagf("--tx-result-query-mode=%s", backend.IndexQueryModeExecutionNodesOnly), - testnet.WithAdditionalFlag("--event-query-mode=execution-nodes-only"), - ), - - testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), - testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), - } - - observers := []testnet.ObserverConfig{{ - ContainerName: testnet.PrimaryON, - LogLevel: zerolog.InfoLevel, - AdditionalFlags: []string{ - fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), - fmt.Sprintf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), - "--execution-data-sync-enabled=true", - "--execution-data-indexing-enabled=true", - "--local-service-api-enabled=true", - "--event-query-mode=execution-nodes-only", - }, - }, - { - ContainerName: "observer_2", - LogLevel: zerolog.InfoLevel, - }, - } - - // prepare the network - conf := testnet.NewNetworkConfig("observer_indexing_enabled_extended_test", nodeConfigs, testnet.WithObservers(observers...)) - s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) - - // start the network - ctx, cancel := context.WithCancel(context.Background()) - s.cancel = cancel - - s.net.Start(ctx) -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestObserverIndexedRPCsHappyPath() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - t := s.T() - - // prepare environment to create a new account - serviceAccountClient, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() - require.NoError(t, err) - - latestBlockID, err := serviceAccountClient.GetLatestBlockID(ctx) - require.NoError(t, err) - - // create new account to deploy Counter to - accountPrivateKey := lib.RandomPrivateKey() - - accountKey := sdk.NewAccountKey(). - FromPrivateKey(accountPrivateKey). - SetHashAlgo(sdkcrypto.SHA3_256). - SetWeight(sdk.AccountKeyWeightThreshold) - - serviceAddress := sdk.Address(serviceAccountClient.Chain.ServiceAddress()) - - // Generate the account creation transaction - createAccountTx, err := templates.CreateAccount( - []*sdk.AccountKey{accountKey}, - []templates.Contract{ - { - Name: lib.CounterContract.Name, - Source: lib.CounterContract.ToCadence(), - }, - }, serviceAddress) - require.NoError(t, err) - - createAccountTx. - SetReferenceBlockID(sdk.Identifier(latestBlockID)). - SetProposalKey(serviceAddress, 0, serviceAccountClient.GetSeqNumber()). - SetPayer(serviceAddress). - SetComputeLimit(9999) - - // send the create account tx - childCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - err = serviceAccountClient.SignAndSendTransaction(childCtx, createAccountTx) - require.NoError(t, err) - - cancel() - - // wait for account to be created - var accountCreationTxRes *sdk.TransactionResult - unittest.RequireReturnsBefore(t, func() { - accountCreationTxRes, err = serviceAccountClient.WaitForSealed(context.Background(), createAccountTx.ID()) - require.NoError(t, err) - }, 20*time.Second, "has to seal before timeout") - - // obtain the account address - var accountCreatedPayload []byte - var newAccountAddress sdk.Address - for _, event := range accountCreationTxRes.Events { - if event.Type == sdk.EventAccountCreated { - accountCreatedEvent := sdk.AccountCreatedEvent(event) - accountCreatedPayload = accountCreatedEvent.Payload - newAccountAddress = accountCreatedEvent.Address() - break - } - } - require.NotEqual(t, sdk.EmptyAddress, newAccountAddress) - - // now we can query events using observerLocal to data which has to be locally indexed - - // get an access node client - accessNode, err := s.getClient(s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort)) - require.NoError(t, err) - - // get an observer with indexer enabled client - observerLocal, err := s.getObserverClient() - require.NoError(t, err) - - // get an upstream observer client - observerUpstream, err := s.getClient(s.net.ContainerByName("observer_2").Addr(testnet.GRPCPort)) - require.NoError(t, err) - - // wait for data to be synced by observerLocal - require.Eventually(t, func() bool { - _, err := observerLocal.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ - Address: newAccountAddress.Bytes(), - BlockHeight: accountCreationTxRes.BlockHeight, - }) - statusErr, ok := status.FromError(err) - if !ok || err == nil { - return true - } - return statusErr.Code() != codes.OutOfRange - }, 30*time.Second, 1*time.Second) - - log := unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) - log.Info().Msg("================> onverted.Payload.Results") - - blockWithAccount, err := observerLocal.GetBlockByID(ctx, &accessproto.GetBlockByIDRequest{ - Id: accountCreationTxRes.BlockID[:], - FullBlockResponse: true, - }) - require.NoError(t, err) - - // GetEventsForBlockIDs - eventsByBlockID := s.TestGetEventsForBlockIDsObserverRPC(ctx, observerLocal, observerUpstream, accessNode, [][]byte{blockWithAccount.Block.Id}) - - // GetEventsForHeightRange - eventsByHeight := s.TestGetEventsForHeightRangeObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Height, blockWithAccount.Block.Height) - - // validate that there is an event that we are looking for - require.Equal(t, eventsByHeight.Results, eventsByBlockID.Results) - - var txIndex uint32 - found := false - for _, eventsInBlock := range eventsByHeight.Results { - for _, event := range eventsInBlock.Events { - if event.Type == sdk.EventAccountCreated { - if bytes.Equal(event.Payload, accountCreatedPayload) { - found = true - txIndex = event.TransactionIndex - } - } - } - } - require.True(t, found) - - // GetSystemTransaction - s.TestGetSystemTransactionObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) - - converted, err := convert.MessageToBlock(blockWithAccount.Block) - require.NoError(t, err) - - resultId := converted.Payload.Results[0].ID() - - // GetExecutionResultByID - s.TestGetExecutionResultByIDObserverRPC(ctx, observerLocal, observerUpstream, accessNode, convert.IdentifierToMessage(resultId)) - - //GetTransaction - s.TestGetTransactionObserverRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.TransactionID.Bytes(), blockWithAccount.Block.Id, nil) - - // GetTransactionResult - s.TestGetTransactionResultObserverRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.TransactionID.Bytes(), blockWithAccount.Block.Id, accountCreationTxRes.CollectionID.Bytes()) - - //GetTransactionResultByIndex - s.TestGetTransactionResultsByIndexIDObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id, txIndex) - - // GetTransactionResultsByBlockID - s.TestGetTransactionResultsByBlockIDObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) - - // GetTransactionsByBlockID - s.TestGetTransactionsByBlockIDObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) - - // GetCollectionByID - s.TestGetCollectionByIDObserverRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.CollectionID.Bytes()) - - // ExecuteScriptAtBlockHeight - s.TestExecuteScriptAtBlockHeightObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Height, []byte(simpleScript)) - - // ExecuteScriptAtBlockID - s.TestExecuteScriptAtBlockIDObserverRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id, []byte(simpleScript)) - - // GetAccountAtBlockHeight - s.TestGetAccountAtBlockHeightObserverRPC(ctx, observerLocal, observerUpstream, accessNode, newAccountAddress.Bytes(), accountCreationTxRes.BlockHeight) - - // GetAccount - //getAccountObserver1Response, err := observerLocal.GetAccount(ctx, &accessproto.GetAccountRequest{ - // Address: newAccountAddress.Bytes(), - //}) - //require.NoError(t, err) - // - //getAccountObserver2Response, err := observerUpstream.GetAccount(ctx, &accessproto.GetAccountRequest{ - // Address: newAccountAddress.Bytes(), - //}) - //require.NoError(t, err) - // - //getAccountAccessResponse, err := accessNode.GetAccount(ctx, &accessproto.GetAccountRequest{ - // Address: newAccountAddress.Bytes(), - //}) - //require.NoError(t, err) - // - //require.Equal(t, getAccountAccessResponse.Account, getAccountObserver2Response.Account) - //require.Equal(t, getAccountAccessResponse.Account, getAccountObserver1Response.Account) - - //GetAccountAtLatestBlock - //getAccountAtLatestBlockObserver1Response, err := observerLocal.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ - // Address: newAccountAddress.Bytes(), - //}) - //require.NoError(t, err) - // - //getAccountAtLatestBlockObserver2Response, err := observerUpstream.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ - // Address: newAccountAddress.Bytes(), - //}) - //require.NoError(t, err) - // - //getAccountAtLatestBlockAccessResponse, err := accessNode.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ - // Address: newAccountAddress.Bytes(), - //}) - //require.NoError(t, err) - // - //require.Equal(t, getAccountAtLatestBlockObserver2Response.Account, getAccountAtLatestBlockAccessResponse.Account) - //require.Equal(t, getAccountAtLatestBlockObserver1Response.Account, getAccountAtLatestBlockAccessResponse.Account) -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestGetEventsForBlockIDsObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockIds [][]byte, -) *accessproto.EventsResponse { - observerLocalResponse, err := observerLocal.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ - Type: sdk.EventAccountCreated, - BlockIds: blockIds, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ - Type: sdk.EventAccountCreated, - BlockIds: blockIds, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ - Type: sdk.EventAccountCreated, - BlockIds: blockIds, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Results, observerLocalResponse.Results) - require.Equal(s.T(), accessNodeResponse.Results, observerUpstreamResponse.Results) - - return observerLocalResponse -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestGetEventsForHeightRangeObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - startHeight uint64, - endHeight uint64, -) *accessproto.EventsResponse { - observerLocalResponse, err := observerLocal.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ - Type: sdk.EventAccountCreated, - StartHeight: startHeight, - EndHeight: endHeight, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ - Type: sdk.EventAccountCreated, - StartHeight: startHeight, - EndHeight: endHeight, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ - Type: sdk.EventAccountCreated, - StartHeight: startHeight, - EndHeight: endHeight, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Results, observerLocalResponse.Results) - require.Equal(s.T(), accessNodeResponse.Results, observerUpstreamResponse.Results) - - return observerLocalResponse -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestGetAccountAtBlockHeightObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - accountAddress []byte, - blockHeight uint64, -) { - - observerLocalResponse, err := observerLocal.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ - Address: accountAddress, - BlockHeight: blockHeight, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ - Address: accountAddress, - BlockHeight: blockHeight, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ - Address: accountAddress, - BlockHeight: blockHeight, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Account, observerLocalResponse.Account) - require.Equal(s.T(), accessNodeResponse.Account, observerUpstreamResponse.Account) -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestGetSystemTransactionObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockId []byte, -) { - - observerLocalResponse, err := observerLocal.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ - BlockId: blockId, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ - BlockId: blockId, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ - BlockId: blockId, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Transaction, observerLocalResponse.Transaction) - require.Equal(s.T(), accessNodeResponse.Transaction, observerUpstreamResponse.Transaction) -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestGetExecutionResultByIDObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - id []byte, -) { - - observerLocalResponse, err := observerLocal.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ - Id: id, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ - Id: id, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ - Id: id, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.ExecutionResult, observerLocalResponse.ExecutionResult) - require.Equal(s.T(), accessNodeResponse.ExecutionResult, observerUpstreamResponse.ExecutionResult) -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestGetTransactionObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - id []byte, - blockId []byte, - collectionId []byte, -) { - - observerLocalResponse, err := observerLocal.GetTransaction(ctx, &accessproto.GetTransactionRequest{ - Id: id, - BlockId: blockId, - CollectionId: collectionId, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetTransaction(ctx, &accessproto.GetTransactionRequest{ - Id: id, - BlockId: blockId, - CollectionId: collectionId, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetTransaction(ctx, &accessproto.GetTransactionRequest{ - Id: id, - BlockId: blockId, - CollectionId: collectionId, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Transaction, observerLocalResponse.Transaction) - require.Equal(s.T(), accessNodeResponse.Transaction, observerUpstreamResponse.Transaction) -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestGetTransactionResultObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - id []byte, - blockId []byte, - collectionId []byte, -) { - - observerLocalResponse, err := observerLocal.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ - Id: id, - BlockId: blockId, - CollectionId: collectionId, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ - Id: id, - BlockId: blockId, - CollectionId: collectionId, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ - Id: id, - BlockId: blockId, - CollectionId: collectionId, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Events, observerLocalResponse.Events) - require.Equal(s.T(), accessNodeResponse.Events, observerUpstreamResponse.Events) -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestGetTransactionResultsByBlockIDObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockId []byte, -) { - - observerLocalResponse, err := observerLocal.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ - BlockId: blockId, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ - BlockId: blockId, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ - BlockId: blockId, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.TransactionResults, observerLocalResponse.TransactionResults) - require.Equal(s.T(), accessNodeResponse.TransactionResults, observerUpstreamResponse.TransactionResults) -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestGetTransactionResultsByIndexIDObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockId []byte, - index uint32, -) { - observerLocalResponse, err := observerLocal.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ - BlockId: blockId, - Index: index, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ - BlockId: blockId, - Index: index, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ - BlockId: blockId, - Index: index, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Events, observerLocalResponse.Events) - require.Equal(s.T(), accessNodeResponse.Events, observerUpstreamResponse.Events) -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestGetTransactionsByBlockIDObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockId []byte, -) { - - observerLocalResponse, err := observerLocal.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ - BlockId: blockId, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ - BlockId: blockId, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ - BlockId: blockId, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Transactions, observerLocalResponse.Transactions) - require.Equal(s.T(), accessNodeResponse.Transactions, observerUpstreamResponse.Transactions) -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestGetCollectionByIDObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - collectionId []byte, -) { - - observerLocalResponse, err := observerLocal.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ - Id: collectionId, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ - Id: collectionId, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ - Id: collectionId, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Collection, observerLocalResponse.Collection) - require.Equal(s.T(), accessNodeResponse.Collection, observerUpstreamResponse.Collection) -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestExecuteScriptAtBlockHeightObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockHeight uint64, - script []byte, -) { - - observerLocalResponse, err := observerLocal.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ - BlockHeight: blockHeight, - Script: script, - Arguments: make([][]byte, 0), - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ - BlockHeight: blockHeight, - Script: script, - Arguments: make([][]byte, 0), - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ - BlockHeight: blockHeight, - Script: script, - Arguments: make([][]byte, 0), - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Value, observerLocalResponse.Value) - require.Equal(s.T(), accessNodeResponse.Value, observerUpstreamResponse.Value) -} - -func (s *ObserverIndexerEnabledExtendedSuite) TestExecuteScriptAtBlockIDObserverRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockId []byte, - script []byte, -) { - - observerLocalResponse, err := observerLocal.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ - BlockId: blockId, - Script: script, - Arguments: make([][]byte, 0), - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ - BlockId: blockId, - Script: script, - Arguments: make([][]byte, 0), - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ - BlockId: blockId, - Script: script, - Arguments: make([][]byte, 0), - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Value, observerLocalResponse.Value) - require.Equal(s.T(), accessNodeResponse.Value, observerUpstreamResponse.Value) -} diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_test.go index b8f0f34787d..a80e10af988 100644 --- a/integration/tests/access/cohort2/observer_indexer_enabled_test.go +++ b/integration/tests/access/cohort2/observer_indexer_enabled_test.go @@ -18,19 +18,18 @@ import ( sdkcrypto "github.com/onflow/flow-go-sdk/crypto" "github.com/onflow/flow-go-sdk/templates" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/integration/testnet" "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" - "github.com/onflow/cadence" accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow/protobuf/go/flow/entities" ) var ( - simpleScript = `pub fun main(): Int { return 42; }` - simpleScriptResult = cadence.NewInt(42) + simpleScript = `pub fun main(): Int { return 42; }` ) func TestObserverIndexerEnabled(t *testing.T) { @@ -43,8 +42,8 @@ type ObserverIndexerEnabledSuite struct { ObserverSuite } -// SetupTest sets up the test suite by starting the network and preparing the observer client. -// By overriding this function, we can ensure that the observer is started with correct parameters and select +// SetupTest sets up the test suite by starting the network and preparing the observers client. +// By overriding this function, we can ensure that the observers are started with correct parameters and select // the RPCs and REST endpoints that are tested. func (s *ObserverIndexerEnabledSuite) SetupTest() { s.localRpc = map[string]struct{}{ @@ -111,17 +110,23 @@ func (s *ObserverIndexerEnabledSuite) SetupTest() { testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), } - observers := []testnet.ObserverConfig{{ - LogLevel: zerolog.InfoLevel, - AdditionalFlags: []string{ - fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), - fmt.Sprintf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), - "--execution-data-sync-enabled=true", - "--execution-data-indexing-enabled=true", - "--local-service-api-enabled=true", - "--event-query-mode=execution-nodes-only", + observers := []testnet.ObserverConfig{ + { + LogLevel: zerolog.InfoLevel, + AdditionalFlags: []string{ + fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + fmt.Sprintf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + "--execution-data-sync-enabled=true", + "--execution-data-indexing-enabled=true", + "--local-service-api-enabled=true", + "--event-query-mode=execution-nodes-only", + }, }, - }} + { + ContainerName: "observer_2", + LogLevel: zerolog.InfoLevel, + }, + } // prepare the network conf := testnet.NewNetworkConfig("observer_indexing_enabled_test", nodeConfigs, testnet.WithObservers(observers...)) @@ -135,9 +140,6 @@ func (s *ObserverIndexerEnabledSuite) SetupTest() { } // TestObserverIndexedRPCsHappyPath tests RPCs that are handled by the observer by using a dedicated indexer for the events. -// For now the observer only supports the following RPCs: -// - GetEventsForHeightRange -// - GetEventsForBlockIDs // To ensure that the observer is handling these RPCs, we stop the upstream access node and verify that the observer client // returns success for valid requests and errors for invalid ones. func (s *ObserverIndexerEnabledSuite) TestObserverIndexedRPCsHappyPath() { @@ -265,6 +267,188 @@ func (s *ObserverIndexerEnabledSuite) TestObserverIndexedRPCsHappyPath() { } +// TestAllObserverIndexedRPCsHappyPath tests the observer with the indexer enabled, +// observer configured to proxy requests to an access node and access node itself. All responses are compared +// to ensure all of the endpoints are working as expected. +// For now the observer only supports the following RPCs: +// -GetAccountAtBlockHeight +// -GetEventsForHeightRange +// -GetEventsForBlockIDs +// -GetSystemTransaction +// -GetTransactionsByBlockID +// -GetTransactionResultsByBlockID +// -ExecuteScriptAtBlockID +// -ExecuteScriptAtBlockHeight +// -GetExecutionResultByID +// -GetCollectionByID +// -GetTransaction +// -GetTransactionResult +// -GetTransactionResultByIndex +func (s *ObserverIndexerEnabledSuite) TestAllObserverIndexedRPCsHappyPath() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + t := s.T() + + // prepare environment to create a new account + serviceAccountClient, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + require.NoError(t, err) + + latestBlockID, err := serviceAccountClient.GetLatestBlockID(ctx) + require.NoError(t, err) + + // create new account to deploy Counter to + accountPrivateKey := lib.RandomPrivateKey() + + accountKey := sdk.NewAccountKey(). + FromPrivateKey(accountPrivateKey). + SetHashAlgo(sdkcrypto.SHA3_256). + SetWeight(sdk.AccountKeyWeightThreshold) + + serviceAddress := sdk.Address(serviceAccountClient.Chain.ServiceAddress()) + + // Generate the account creation transaction + createAccountTx, err := templates.CreateAccount( + []*sdk.AccountKey{accountKey}, + []templates.Contract{ + { + Name: lib.CounterContract.Name, + Source: lib.CounterContract.ToCadence(), + }, + }, serviceAddress) + require.NoError(t, err) + + createAccountTx. + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(serviceAddress, 0, serviceAccountClient.GetSeqNumber()). + SetPayer(serviceAddress). + SetComputeLimit(9999) + + // send the create account tx + childCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + err = serviceAccountClient.SignAndSendTransaction(childCtx, createAccountTx) + require.NoError(t, err) + + cancel() + + // wait for account to be created + var accountCreationTxRes *sdk.TransactionResult + unittest.RequireReturnsBefore(t, func() { + accountCreationTxRes, err = serviceAccountClient.WaitForSealed(context.Background(), createAccountTx.ID()) + require.NoError(t, err) + }, 20*time.Second, "has to seal before timeout") + + // obtain the account address + var accountCreatedPayload []byte + var newAccountAddress sdk.Address + for _, event := range accountCreationTxRes.Events { + if event.Type == sdk.EventAccountCreated { + accountCreatedEvent := sdk.AccountCreatedEvent(event) + accountCreatedPayload = accountCreatedEvent.Payload + newAccountAddress = accountCreatedEvent.Address() + break + } + } + require.NotEqual(t, sdk.EmptyAddress, newAccountAddress) + + // now we can query events using observerLocal to data which has to be locally indexed + + // get an access node client + accessNode, err := s.getClient(s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort)) + require.NoError(t, err) + + // get an observer with indexer enabled client + observerLocal, err := s.getObserverClient() + require.NoError(t, err) + + // get an upstream observer client + observerUpstream, err := s.getClient(s.net.ContainerByName("observer_2").Addr(testnet.GRPCPort)) + require.NoError(t, err) + + // wait for data to be synced by observerLocal + require.Eventually(t, func() bool { + _, err := observerLocal.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: newAccountAddress.Bytes(), + BlockHeight: accountCreationTxRes.BlockHeight, + }) + statusErr, ok := status.FromError(err) + if !ok || err == nil { + return true + } + return statusErr.Code() != codes.OutOfRange + }, 30*time.Second, 1*time.Second) + + log := unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + log.Info().Msg("================> onverted.Payload.Results") + + blockWithAccount, err := observerLocal.GetBlockByID(ctx, &accessproto.GetBlockByIDRequest{ + Id: accountCreationTxRes.BlockID[:], + FullBlockResponse: true, + }) + require.NoError(t, err) + + // GetEventsForBlockIDs + eventsByBlockID := s.checkGetEventsForBlockIDsRPC(ctx, observerLocal, observerUpstream, accessNode, [][]byte{blockWithAccount.Block.Id}) + + // GetEventsForHeightRange + eventsByHeight := s.checkGetEventsForHeightRangeRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Height, blockWithAccount.Block.Height) + + // validate that there is an event that we are looking for + require.Equal(t, eventsByHeight.Results, eventsByBlockID.Results) + + var txIndex uint32 + found := false + for _, eventsInBlock := range eventsByHeight.Results { + for _, event := range eventsInBlock.Events { + if event.Type == sdk.EventAccountCreated { + if bytes.Equal(event.Payload, accountCreatedPayload) { + found = true + txIndex = event.TransactionIndex + } + } + } + } + require.True(t, found) + + // GetSystemTransaction + s.checkGetSystemTransactionRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) + + converted, err := convert.MessageToBlock(blockWithAccount.Block) + require.NoError(t, err) + + resultId := converted.Payload.Results[0].ID() + + // GetExecutionResultByID + s.checkGetExecutionResultByIDRPC(ctx, observerLocal, observerUpstream, accessNode, convert.IdentifierToMessage(resultId)) + + //GetTransaction + s.checkGetTransactionRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.TransactionID.Bytes(), blockWithAccount.Block.Id, nil) + + // GetTransactionResult + s.checkGetTransactionResultRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.TransactionID.Bytes(), blockWithAccount.Block.Id, accountCreationTxRes.CollectionID.Bytes()) + + //GetTransactionResultByIndex + s.checkGetTransactionResultsByIndexIDRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id, txIndex) + + // GetTransactionResultsByBlockID + s.checkGetTransactionResultsByBlockIDRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) + + // GetTransactionsByBlockID + s.checkGetTransactionsByBlockIDRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) + + // GetCollectionByID + s.checkGetCollectionByIDRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.CollectionID.Bytes()) + + // ExecuteScriptAtBlockHeight + s.checkExecuteScriptAtBlockHeightRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Height, []byte(simpleScript)) + + // ExecuteScriptAtBlockID + s.checkExecuteScriptAtBlockIDRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id, []byte(simpleScript)) + + // GetAccountAtBlockHeight + s.checkGetAccountAtBlockHeightRPC(ctx, observerLocal, observerUpstream, accessNode, newAccountAddress.Bytes(), accountCreationTxRes.BlockHeight) +} + func (s *ObserverIndexerEnabledSuite) getRPCs() []RPCTest { return []RPCTest{ {name: "Ping", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { @@ -465,3 +649,415 @@ func (s *ObserverIndexerEnabledSuite) getRestEndpoints() []RestEndpointTest { }, } } + +func (s *ObserverIndexerEnabledSuite) checkGetEventsForBlockIDsRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockIds [][]byte, +) *accessproto.EventsResponse { + observerLocalResponse, err := observerLocal.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + Type: sdk.EventAccountCreated, + BlockIds: blockIds, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + Type: sdk.EventAccountCreated, + BlockIds: blockIds, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + Type: sdk.EventAccountCreated, + BlockIds: blockIds, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Results, observerLocalResponse.Results) + require.Equal(s.T(), accessNodeResponse.Results, observerUpstreamResponse.Results) + + return observerLocalResponse +} + +func (s *ObserverIndexerEnabledSuite) checkGetEventsForHeightRangeRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + startHeight uint64, + endHeight uint64, +) *accessproto.EventsResponse { + observerLocalResponse, err := observerLocal.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: startHeight, + EndHeight: endHeight, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: startHeight, + EndHeight: endHeight, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: startHeight, + EndHeight: endHeight, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Results, observerLocalResponse.Results) + require.Equal(s.T(), accessNodeResponse.Results, observerUpstreamResponse.Results) + + return observerLocalResponse +} + +func (s *ObserverIndexerEnabledSuite) checkGetAccountAtBlockHeightRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + accountAddress []byte, + blockHeight uint64, +) { + + observerLocalResponse, err := observerLocal.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: accountAddress, + BlockHeight: blockHeight, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: accountAddress, + BlockHeight: blockHeight, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: accountAddress, + BlockHeight: blockHeight, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Account, observerLocalResponse.Account) + require.Equal(s.T(), accessNodeResponse.Account, observerUpstreamResponse.Account) +} + +func (s *ObserverIndexerEnabledSuite) checkGetSystemTransactionRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockId []byte, +) { + + observerLocalResponse, err := observerLocal.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ + BlockId: blockId, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ + BlockId: blockId, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ + BlockId: blockId, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Transaction, observerLocalResponse.Transaction) + require.Equal(s.T(), accessNodeResponse.Transaction, observerUpstreamResponse.Transaction) +} + +func (s *ObserverIndexerEnabledSuite) checkGetExecutionResultByIDRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + id []byte, +) { + + observerLocalResponse, err := observerLocal.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ + Id: id, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ + Id: id, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ + Id: id, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.ExecutionResult, observerLocalResponse.ExecutionResult) + require.Equal(s.T(), accessNodeResponse.ExecutionResult, observerUpstreamResponse.ExecutionResult) +} + +func (s *ObserverIndexerEnabledSuite) checkGetTransactionRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + id []byte, + blockId []byte, + collectionId []byte, +) { + + observerLocalResponse, err := observerLocal.GetTransaction(ctx, &accessproto.GetTransactionRequest{ + Id: id, + BlockId: blockId, + CollectionId: collectionId, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetTransaction(ctx, &accessproto.GetTransactionRequest{ + Id: id, + BlockId: blockId, + CollectionId: collectionId, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetTransaction(ctx, &accessproto.GetTransactionRequest{ + Id: id, + BlockId: blockId, + CollectionId: collectionId, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Transaction, observerLocalResponse.Transaction) + require.Equal(s.T(), accessNodeResponse.Transaction, observerUpstreamResponse.Transaction) +} + +func (s *ObserverIndexerEnabledSuite) checkGetTransactionResultRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + id []byte, + blockId []byte, + collectionId []byte, +) { + + observerLocalResponse, err := observerLocal.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ + Id: id, + BlockId: blockId, + CollectionId: collectionId, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ + Id: id, + BlockId: blockId, + CollectionId: collectionId, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ + Id: id, + BlockId: blockId, + CollectionId: collectionId, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Events, observerLocalResponse.Events) + require.Equal(s.T(), accessNodeResponse.Events, observerUpstreamResponse.Events) +} + +func (s *ObserverIndexerEnabledSuite) checkGetTransactionResultsByBlockIDRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockId []byte, +) { + + observerLocalResponse, err := observerLocal.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.TransactionResults, observerLocalResponse.TransactionResults) + require.Equal(s.T(), accessNodeResponse.TransactionResults, observerUpstreamResponse.TransactionResults) +} + +func (s *ObserverIndexerEnabledSuite) checkGetTransactionResultsByIndexIDRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockId []byte, + index uint32, +) { + observerLocalResponse, err := observerLocal.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ + BlockId: blockId, + Index: index, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ + BlockId: blockId, + Index: index, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ + BlockId: blockId, + Index: index, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Events, observerLocalResponse.Events) + require.Equal(s.T(), accessNodeResponse.Events, observerUpstreamResponse.Events) +} + +func (s *ObserverIndexerEnabledSuite) checkGetTransactionsByBlockIDRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockId []byte, +) { + + observerLocalResponse, err := observerLocal.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Transactions, observerLocalResponse.Transactions) + require.Equal(s.T(), accessNodeResponse.Transactions, observerUpstreamResponse.Transactions) +} + +func (s *ObserverIndexerEnabledSuite) checkGetCollectionByIDRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + collectionId []byte, +) { + + observerLocalResponse, err := observerLocal.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ + Id: collectionId, + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ + Id: collectionId, + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ + Id: collectionId, + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Collection, observerLocalResponse.Collection) + require.Equal(s.T(), accessNodeResponse.Collection, observerUpstreamResponse.Collection) +} + +func (s *ObserverIndexerEnabledSuite) checkExecuteScriptAtBlockHeightRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockHeight uint64, + script []byte, +) { + + observerLocalResponse, err := observerLocal.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: blockHeight, + Script: script, + Arguments: make([][]byte, 0), + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: blockHeight, + Script: script, + Arguments: make([][]byte, 0), + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: blockHeight, + Script: script, + Arguments: make([][]byte, 0), + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Value, observerLocalResponse.Value) + require.Equal(s.T(), accessNodeResponse.Value, observerUpstreamResponse.Value) +} + +func (s *ObserverIndexerEnabledSuite) checkExecuteScriptAtBlockIDRPC( + ctx context.Context, + observerLocal accessproto.AccessAPIClient, + observerUpstream accessproto.AccessAPIClient, + accessNode accessproto.AccessAPIClient, + blockId []byte, + script []byte, +) { + + observerLocalResponse, err := observerLocal.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockId, + Script: script, + Arguments: make([][]byte, 0), + }) + require.NoError(s.T(), err) + + observerUpstreamResponse, err := observerUpstream.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockId, + Script: script, + Arguments: make([][]byte, 0), + }) + require.NoError(s.T(), err) + + accessNodeResponse, err := accessNode.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockId, + Script: script, + Arguments: make([][]byte, 0), + }) + require.NoError(s.T(), err) + + require.Equal(s.T(), accessNodeResponse.Value, observerLocalResponse.Value) + require.Equal(s.T(), accessNodeResponse.Value, observerUpstreamResponse.Value) +} From 45fceadc1a9656513c8172e928bf1c93b1afdc71 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 09:09:27 -0400 Subject: [PATCH 026/148] Update cmd/bootstrap/cmd/keys.go Co-authored-by: Jordan Schalm --- cmd/bootstrap/cmd/keys.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/bootstrap/cmd/keys.go b/cmd/bootstrap/cmd/keys.go index 7d3f053a714..f33b5f28241 100644 --- a/cmd/bootstrap/cmd/keys.go +++ b/cmd/bootstrap/cmd/keys.go @@ -67,8 +67,8 @@ func assembleNodeInfo(nodeConfig model.NodeConfig, networkKey, stakingKey crypto } log.Debug(). - Str("networkPubKey", common.PubKeyToString(networkKey.PublicKey())). - Str("stakingPubKey", common.PubKeyToString(stakingKey.PublicKey())). + Str("networkPubKey", networkKey.PublicKey().String()). + Str("stakingPubKey", stakingKey.PublicKey().String()). Msg("encoded public staking and network keys") nodeInfo := model.NewPrivateNodeInfo( From 18f7aec7dd01e1ea789db21a8b5402cb09d12fdb Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 10:14:54 -0400 Subject: [PATCH 027/148] Update keys.go --- cmd/bootstrap/cmd/keys.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/bootstrap/cmd/keys.go b/cmd/bootstrap/cmd/keys.go index 7d3f053a714..f33b5f28241 100644 --- a/cmd/bootstrap/cmd/keys.go +++ b/cmd/bootstrap/cmd/keys.go @@ -67,8 +67,8 @@ func assembleNodeInfo(nodeConfig model.NodeConfig, networkKey, stakingKey crypto } log.Debug(). - Str("networkPubKey", common.PubKeyToString(networkKey.PublicKey())). - Str("stakingPubKey", common.PubKeyToString(stakingKey.PublicKey())). + Str("networkPubKey", networkKey.PublicKey().String()). + Str("stakingPubKey", stakingKey.PublicKey().String()). Msg("encoded public staking and network keys") nodeInfo := model.NewPrivateNodeInfo( From c82ffbcdd008f32745a9986607926b61520e92f3 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 12:13:43 -0400 Subject: [PATCH 028/148] move NotEjected filter to filter package - add filter for valid protocol participant --- .../node_builder/access_node_builder.go | 2 +- cmd/scaffold.go | 2 +- cmd/util/cmd/epochs/cmd/recover.go | 19 ++++++++++--------- model/flow/filter/identity.go | 17 +++++++++++++++++ .../p2p/cache/node_blocklist_wrapper_test.go | 3 +-- network/underlay/network.go | 10 +--------- 6 files changed, 31 insertions(+), 22 deletions(-) diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 16fa42a5d8a..f466ddfd8bc 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -1307,7 +1307,7 @@ func (builder *FlowAccessNodeBuilder) InitIDProviders() { filter.And( filter.HasRole[flow.Identity](flow.RoleConsensus), filter.Not(filter.HasNodeID[flow.Identity](node.Me.NodeID())), - underlay.NotEjectedFilter, + filter.NotEjectedFilter, ), builder.IdentityProvider, ) diff --git a/cmd/scaffold.go b/cmd/scaffold.go index 1086314265b..d645dc9cf85 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -1253,7 +1253,7 @@ func (fnb *FlowNodeBuilder) InitIDProviders() { filter.And( filter.HasRole[flow.Identity](flow.RoleConsensus), filter.Not(filter.HasNodeID[flow.Identity](node.Me.NodeID())), - underlay.NotEjectedFilter, + filter.NotEjectedFilter, ), node.IdentityProvider, ) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 405d46473ee..9d5f2cf63b8 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" epochcmdutil "github.com/onflow/flow-go/cmd/util/cmd/epochs/utils" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/state/protocol/inmem" ) @@ -107,7 +108,7 @@ func generateRecoverEpochTxArgs(getSnapshot func() *inmem.Snapshot) func(cmd *co // extractResetEpochArgs extracts the required transaction arguments for the `resetEpoch` transaction func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { epoch := snapshot.Epochs().Current() - ids, err := epoch.InitialIdentities() + ids, err := snapshot.Identities(filter.IsValidProtocolParticipant) if err != nil { log.Fatal().Err(err).Msg("failed to get initial identities for current epoch") } @@ -156,24 +157,24 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { dkgPubKeys := make([]cadence.Value, 0) nodeIds := make([]cadence.Value, 0) - ids.Map(func(skeleton flow.IdentitySkeleton) flow.IdentitySkeleton { - if skeleton.GetRole() == flow.RoleConsensus { - dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(skeleton.GetNodeID()) + ids.Map(func(identity flow.Identity) flow.Identity { + if identity.GetRole() == flow.RoleConsensus { + dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(identity.GetNodeID()) if keyShareErr != nil { - log.Fatal().Err(keyShareErr).Msg(fmt.Sprintf("failed to get dkg pub key share for node: %s", skeleton.GetNodeID())) + log.Fatal().Err(keyShareErr).Msg(fmt.Sprintf("failed to get dkg pub key share for node: %s", identity.GetNodeID())) } dkgPubKeyCdc, cdcErr := cadence.NewString(dkgPubKey.String()) if cdcErr != nil { - log.Fatal().Err(cdcErr).Msg(fmt.Sprintf("failed to get dkg pub key cadence string for node: %s", skeleton.GetNodeID())) + log.Fatal().Err(cdcErr).Msg(fmt.Sprintf("failed to get dkg pub key cadence string for node: %s", identity.GetNodeID())) } dkgPubKeys = append(dkgPubKeys, dkgPubKeyCdc) } - nodeIdCdc, err := cadence.NewString(skeleton.GetNodeID().String()) + nodeIdCdc, err := cadence.NewString(identity.GetNodeID().String()) if err != nil { - log.Fatal().Err(err).Msg(fmt.Sprintf("failed to convert node ID to cadence string: %s", skeleton.GetNodeID())) + log.Fatal().Err(err).Msg(fmt.Sprintf("failed to convert node ID to cadence string: %s", identity.GetNodeID())) } nodeIds = append(nodeIds, nodeIdCdc) - return skeleton + return identity }) // @TODO: cluster qcs are converted into flow.ClusterQCVoteData types, diff --git a/model/flow/filter/identity.go b/model/flow/filter/identity.go index adbbceee9b0..2afca5e2212 100644 --- a/model/flow/filter/identity.go +++ b/model/flow/filter/identity.go @@ -145,3 +145,20 @@ var IsVotingConsensusCommitteeMember = And[flow.Identity]( // equivalent to the filter for consensus committee members, as these are // the same group for now. var IsValidDKGParticipant = IsConsensusCommitteeMember + +// NotEjectedFilter is an identity filter for peers that are not ejected. +var NotEjectedFilter = Not(HasParticipationStatus(flow.EpochParticipationStatusEjected)) + +// HasWeightGreaterThanZero returns a filter for nodes with a weight greater than zero. +func HasWeightGreaterThanZero[T flow.GenericIdentity](identity *T) bool { + return (*identity).GetInitialWeight() > 0 +} + +// IsValidProtocolParticipant is an identity filter for all valid protocol participants. +// A protocol participant is considered valid if and only if the following are both true. +// 1. The node is not ejected. +// 2. The node has a weight greater than 0. +var IsValidProtocolParticipant = And[flow.Identity]( + NotEjectedFilter, // enforces 1 + HasWeightGreaterThanZero[flow.Identity], // enforces 2 +) diff --git a/network/p2p/cache/node_blocklist_wrapper_test.go b/network/p2p/cache/node_blocklist_wrapper_test.go index 95ee5bc098b..c3e3d36a37f 100644 --- a/network/p2p/cache/node_blocklist_wrapper_test.go +++ b/network/p2p/cache/node_blocklist_wrapper_test.go @@ -17,7 +17,6 @@ import ( "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p/cache" - "github.com/onflow/flow-go/network/underlay" "github.com/onflow/flow-go/utils/unittest" ) @@ -177,7 +176,7 @@ func (s *NodeDisallowListWrapperTestSuite) TestDisallowListNode() { s.provider.On("Identities", mock.Anything).Return(combinedIdentities) - identities := s.wrapper.Identities(underlay.NotEjectedFilter) + identities := s.wrapper.Identities(filter.NotEjectedFilter) require.Equal(s.T(), len(honestIdentities), len(identities)) // expected only honest nodes to be returned for _, i := range identities { diff --git a/network/underlay/network.go b/network/underlay/network.go index 6c238939b8b..9217aa099f4 100644 --- a/network/underlay/network.go +++ b/network/underlay/network.go @@ -74,14 +74,6 @@ var ( ErrUnicastMsgWithoutSub = errors.New("networking layer does not have subscription for the channel ID indicated in the unicast message received") ) -// NotEjectedFilter is an identity filter that, when applied to the identity -// table at a given snapshot, returns all nodes that we should communicate with -// over the networking layer. -// -// NOTE: The protocol state includes nodes from the previous/next epoch that should -// be included in network communication. We omit any nodes that have been ejected. -var NotEjectedFilter = filter.Not(filter.HasParticipationStatus(flow.EpochParticipationStatusEjected)) - // Network serves as the comprehensive networking layer that integrates three interfaces within Flow; Underlay, EngineRegistry, and ConduitAdapter. // It is responsible for creating conduits through which engines can send and receive messages to and from other engines on the network, as well as registering other services // such as BlobService and PingService. It also provides a set of APIs that can be used to send messages to other nodes on the network. @@ -545,7 +537,7 @@ func (n *Network) UnRegisterChannel(channel channels.Channel) error { } func (n *Network) Identities() flow.IdentityList { - return n.identityProvider.Identities(NotEjectedFilter) + return n.identityProvider.Identities(filter.NotEjectedFilter) } func (n *Network) Identity(pid peer.ID) (*flow.Identity, bool) { From 321da9748a10e2890942c05a2f747a23104a1e0a Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 15:38:10 -0400 Subject: [PATCH 029/148] get subsets of internal collectors and partner collectors from snapshot - emit fatal log if identity is present in internal node info from disc but missing from snap shot identities list --- cmd/bootstrap/cmd/finalize_test.go | 4 ++-- cmd/bootstrap/cmd/rootblock.go | 2 +- cmd/util/cmd/common/clusters.go | 6 ++--- cmd/util/cmd/epochs/cmd/recover.go | 38 ++++++++++++++++++++++-------- 4 files changed, 34 insertions(+), 16 deletions(-) diff --git a/cmd/bootstrap/cmd/finalize_test.go b/cmd/bootstrap/cmd/finalize_test.go index 395b1a4a774..8d5f15ad19b 100644 --- a/cmd/bootstrap/cmd/finalize_test.go +++ b/cmd/bootstrap/cmd/finalize_test.go @@ -114,14 +114,14 @@ func TestClusterAssignment(t *testing.T) { log := zerolog.Nop() // should not error - _, clusters, err := common.ConstructClusterAssignment(log, partners, internals, int(flagCollectionClusters)) + _, clusters, err := common.ConstructClusterAssignment(log, model.ToIdentityList(partners), model.ToIdentityList(internals), int(flagCollectionClusters)) require.NoError(t, err) require.True(t, checkClusterConstraint(clusters, partners, internals)) // unhappy Path internals = internals[:21] // reduce one internal node // should error - _, _, err = common.ConstructClusterAssignment(log, partners, internals, int(flagCollectionClusters)) + _, _, err = common.ConstructClusterAssignment(log, model.ToIdentityList(partners), model.ToIdentityList(internals), int(flagCollectionClusters)) require.Error(t, err) // revert the flag value flagCollectionClusters = tmp diff --git a/cmd/bootstrap/cmd/rootblock.go b/cmd/bootstrap/cmd/rootblock.go index c745c4f4044..cb6085eb383 100644 --- a/cmd/bootstrap/cmd/rootblock.go +++ b/cmd/bootstrap/cmd/rootblock.go @@ -172,7 +172,7 @@ func rootBlock(cmd *cobra.Command, args []string) { participants := model.ToIdentityList(stakingNodes).Sort(flow.Canonical[flow.Identity]) log.Info().Msg("computing collection node clusters") - assignments, clusters, err := common.ConstructClusterAssignment(log, partnerNodes, internalNodes, int(flagCollectionClusters)) + assignments, clusters, err := common.ConstructClusterAssignment(log, model.ToIdentityList(partnerNodes), model.ToIdentityList(internalNodes), int(flagCollectionClusters)) if err != nil { log.Fatal().Err(err).Msg("unable to generate cluster assignment") } diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index 4fe4c4347c8..39cbe06aa2f 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -29,10 +29,10 @@ import ( // satisfied, an exception is returned. // Note that if an exception is returned with a certain number of internal/partner nodes, there is no chance // of succeeding the assignment by re-running the function without increasing the internal nodes ratio. -func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes []bootstrap.NodeInfo, numCollectionClusters int) (flow.AssignmentList, flow.ClusterList, error) { +func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes flow.IdentityList, numCollectionClusters int) (flow.AssignmentList, flow.ClusterList, error) { - partners := bootstrap.ToIdentityList(partnerNodes).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) - internals := bootstrap.ToIdentityList(internalNodes).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + partners := partnerNodes.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + internals := internalNodes.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) nCollectors := len(partners) + len(internals) // ensure we have at least as many collection nodes as clusters diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 9d5f2cf63b8..6247d454eb4 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -108,26 +108,44 @@ func generateRecoverEpochTxArgs(getSnapshot func() *inmem.Snapshot) func(cmd *co // extractResetEpochArgs extracts the required transaction arguments for the `resetEpoch` transaction func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { epoch := snapshot.Epochs().Current() + ids, err := snapshot.Identities(filter.IsValidProtocolParticipant) if err != nil { - log.Fatal().Err(err).Msg("failed to get initial identities for current epoch") + log.Fatal().Err(err).Msg("failed to get valid protocol participants from snapshot") } - currentEpochDKG, err := epoch.DKG() - if err != nil { - log.Fatal().Err(err).Msg("failed to get DKG for current epoch") - } - - log.Info().Msg("collecting partner network and staking keys") - partnerNodes := common.ReadPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) - log.Info().Msg("") + // separate collector nodes by internal and partner nodes + collectors := ids.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + internalCollectors := make(flow.IdentityList, 0) + partnerCollectors := make(flow.IdentityList, 0) log.Info().Msg("collecting internal node network and staking keys") internalNodes := common.ReadInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagNodeConfigJson) + internalNodesMap := make(map[flow.Identifier]struct{}) + for _, node := range internalNodes { + if !ids.Exists(node.Identity()) { + log.Fatal().Msg(fmt.Sprintf("node ID found in internal node infos missing from protocol snapshot identities: %s", node.NodeID)) + } + internalNodesMap[node.NodeID] = struct{}{} + } log.Info().Msg("") + collectors.Map(func(identity flow.Identity) flow.Identity { + if _, ok := internalNodesMap[identity.NodeID]; ok { + internalCollectors = append(internalCollectors, &identity) + } else { + partnerCollectors = append(partnerCollectors, &identity) + } + return identity + }) + + currentEpochDKG, err := epoch.DKG() + if err != nil { + log.Fatal().Err(err).Msg("failed to get DKG for current epoch") + } + log.Info().Msg("computing collection node clusters") - _, clusters, err := common.ConstructClusterAssignment(log, partnerNodes, internalNodes, flagCollectionClusters) + _, clusters, err := common.ConstructClusterAssignment(log, partnerCollectors, internalCollectors, flagCollectionClusters) if err != nil { log.Fatal().Err(err).Msg("unable to generate cluster assignment") } From 9a4542d48d26063e9a8557972ab870c7a133c67d Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 23:50:18 -0400 Subject: [PATCH 030/148] Update cmd/util/cmd/epochs/cmd/recover.go Co-authored-by: Jordan Schalm --- cmd/util/cmd/epochs/cmd/recover.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 086534769fc..95b93375b91 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -28,8 +28,6 @@ var ( flagAnAddress string flagAnPubkey string - flagPartnerWeights string - flagPartnerNodeInfoDir string flagInternalNodePrivInfoDir string flagConfig string flagCollectionClusters int From 91d6599a9ea350954a3244a8c1a61860ee3ef7df Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 23:51:04 -0400 Subject: [PATCH 031/148] Update cmd/util/cmd/epochs/cmd/recover.go Co-authored-by: Jordan Schalm --- cmd/util/cmd/epochs/cmd/recover.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 95b93375b91..026be866aae 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -51,11 +51,6 @@ func addGenerateRecoverEpochTxArgsCmdFlags() { "path to a JSON file containing multiple node configurations (fields Role, Address, Weight)") generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory "+ "containing the output from the `keygen` command for internal nodes") - generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagPartnerNodeInfoDir, "partner-dir", "", "path to directory "+ - "containing one JSON file starting with node-info.pub..json for every partner node (fields "+ - " in the JSON file: Role, Address, NodeID, NetworkPubKey, StakingPubKey)") - generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagPartnerWeights, "partner-weights", "", "path to a JSON file containing "+ - "a map from partner node's NodeID to their stake") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagStartView, "start-view", 0, "start view of the recovery epoch") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagStakingEndView, "staking-end-view", 0, "end view of the staking phase of the recovery epoch") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEndView, "end-view", 0, "end view of the recovery epoch") From 051629dfd5e9e2e4fe79f05df7c17819ff0a7a38 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 23:51:10 -0400 Subject: [PATCH 032/148] Update cmd/util/cmd/epochs/cmd/recover.go Co-authored-by: Jordan Schalm --- cmd/util/cmd/epochs/cmd/recover.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 026be866aae..9277e6b03ac 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -20,7 +20,7 @@ import ( // identities, generates the cluster QC's and retrieves the DKG key vector of the last successful epoch. var ( generateRecoverEpochTxArgsCmd = &cobra.Command{ - Use: "generate-efm-recovery-data", + Use: "efm-recover-tx-args", Short: "Generates recover epoch transaction arguments", Long: "Generates transaction arguments for the epoch recovery transaction.", Run: generateRecoverEpochTxArgs, From 1399e1864015c67222c988f95a9f7b6dca9a5d04 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 23:51:33 -0400 Subject: [PATCH 033/148] Update cmd/util/cmd/epochs/cmd/recover.go Co-authored-by: Jordan Schalm --- cmd/util/cmd/epochs/cmd/recover.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 9277e6b03ac..57f84bdb5d7 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -130,7 +130,6 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { log.Info().Msg("constructing root QCs for collection node clusters") clusterQCs := common.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) - fmt.Sprintf("", clusterQCs) log.Info().Msg("") randomSource, err := epoch.RandomSource() From 2b9c152bcf90a152060beba47b28646c5c1d2ee3 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 23:52:49 -0400 Subject: [PATCH 034/148] Update recover.go --- cmd/util/cmd/epochs/cmd/recover.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 6247d454eb4..be72cb4225f 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -30,8 +30,6 @@ var ( flagAnAddress string flagAnPubkey string - flagPartnerWeights string - flagPartnerNodeInfoDir string flagInternalNodePrivInfoDir string flagNodeConfigJson string flagCollectionClusters int @@ -53,11 +51,6 @@ func addGenerateRecoverEpochTxArgsCmdFlags() { "path to a JSON file containing multiple node configurations (fields Role, Address, Weight)") generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory "+ "containing the output from the `keygen` command for internal nodes") - generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagPartnerNodeInfoDir, "partner-dir", "", "path to directory "+ - "containing one JSON file starting with node-info.pub..json for every partner node (fields "+ - " in the JSON file: Role, Address, NodeID, NetworkPubKey, StakingPubKey)") - generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagPartnerWeights, "partner-weights", "", "path to a JSON file containing "+ - "a map from partner node's NodeID to their stake") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagStartView, "start-view", 0, "start view of the recovery epoch") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagStakingEndView, "staking-end-view", 0, "end view of the staking phase of the recovery epoch") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEndView, "end-view", 0, "end view of the recovery epoch") From 3c8ee335d9069789f8db685dc536ffbb10f7ee4c Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 23:56:09 -0400 Subject: [PATCH 035/148] epoch counter should be an input --- cmd/util/cmd/epochs/cmd/recover.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index be72cb4225f..13ef925817c 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -36,6 +36,7 @@ var ( flagStartView uint64 flagStakingEndView uint64 flagEndView uint64 + flagEpochCounter uint64 ) func init() { @@ -54,6 +55,7 @@ func addGenerateRecoverEpochTxArgsCmdFlags() { generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagStartView, "start-view", 0, "start view of the recovery epoch") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagStakingEndView, "staking-end-view", 0, "end view of the staking phase of the recovery epoch") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEndView, "end-view", 0, "end view of the recovery epoch") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "the epoch counter used to generate the root cluster block") } func getSnapshot() *inmem.Snapshot { @@ -142,15 +144,10 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { if err != nil { log.Fatal().Err(err).Msg("unable to generate cluster assignment") } - log.Info().Msg("") - epochCounter, err := epoch.Counter() - if err != nil { - log.Fatal().Err(err).Msg("unable to get epoch counter from current epoch") - } log.Info().Msg("constructing root blocks for collection node clusters") - clusterBlocks := run.GenerateRootClusterBlocks(epochCounter, clusters) + clusterBlocks := run.GenerateRootClusterBlocks(flagEpochCounter, clusters) log.Info().Msg("") log.Info().Msg("constructing root QCs for collection node clusters") From d9db0eb024e5cc6096263faf2d11e53279e1ac76 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 23:56:55 -0400 Subject: [PATCH 036/148] smart contract should generate random source with revertibleRandom --- cmd/util/cmd/epochs/cmd/recover.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 13ef925817c..e1d9afab223 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -2,7 +2,6 @@ package cmd import ( "context" - "encoding/hex" "fmt" "github.com/spf13/cobra" @@ -154,15 +153,6 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { clusterQCs := common.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) log.Info().Msg("") - randomSource, err := epoch.RandomSource() - if err != nil { - log.Fatal().Err(err).Msg("failed to get random source for current epoch") - } - randomSourceCdc, err := cadence.NewString(hex.EncodeToString(randomSource)) - if err != nil { - log.Fatal().Err(err).Msg("failed to get random source cadence string") - } - dkgPubKeys := make([]cadence.Value, 0) nodeIds := make([]cadence.Value, 0) ids.Map(func(identity flow.Identity) flow.Identity { @@ -194,7 +184,6 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { } args := []cadence.Value{ - randomSourceCdc, cadence.NewUInt64(flagStartView), cadence.NewUInt64(flagStakingEndView), cadence.NewUInt64(flagEndView), From f48b5a750dff8353e3ae8120945c4132ece06b60 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 23:57:13 -0400 Subject: [PATCH 037/148] Update cmd/util/cmd/epochs/cmd/recover.go Co-authored-by: Jordan Schalm --- cmd/util/cmd/epochs/cmd/recover.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 57f84bdb5d7..1dc4d7818e1 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -42,8 +42,6 @@ func init() { } func addGenerateRecoverEpochTxArgsCmdFlags() { - generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagBucketNetworkName, "bucket-network-name", "", - "when retrieving the root snapshot from a GCP bucket, the network name portion of the URL (eg. \"mainnet-13\")") generateRecoverEpochTxArgsCmd.Flags().IntVar(&flagCollectionClusters, "collection-clusters", 0, "number of collection clusters") // required parameters for network configuration and generation of root node identities From 0cb7caed3c425086bf31ca557ddb9a3ee01dfcef Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 1 Apr 2024 23:57:26 -0400 Subject: [PATCH 038/148] Update cmd/util/cmd/epochs/cmd/recover.go Co-authored-by: Jordan Schalm --- cmd/util/cmd/epochs/cmd/recover.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 1dc4d7818e1..9091187d52c 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -42,7 +42,7 @@ func init() { } func addGenerateRecoverEpochTxArgsCmdFlags() { - generateRecoverEpochTxArgsCmd.Flags().IntVar(&flagCollectionClusters, "collection-clusters", 0, + generateRecoverEpochTxArgsCmd.Flags().IntVar(&flagCollectionClusters, "collection-clusters", 3, "number of collection clusters") // required parameters for network configuration and generation of root node identities generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagConfig, "config", "", From 884fdbf2dd8a1508f897819c55ea4e953287efa2 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 2 Apr 2024 00:44:25 -0400 Subject: [PATCH 039/148] add epoch-length and epoch-staking-phase-length - infer start view, staking phase end view, and epoch end view from curr epoch final view --- cmd/util/cmd/epochs/cmd/recover.go | 39 +++++++++++++++++++----------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index e1d9afab223..6b716b63dee 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -27,15 +27,15 @@ var ( Run: generateRecoverEpochTxArgs(getSnapshot), } - flagAnAddress string - flagAnPubkey string - flagInternalNodePrivInfoDir string - flagNodeConfigJson string - flagCollectionClusters int - flagStartView uint64 - flagStakingEndView uint64 - flagEndView uint64 - flagEpochCounter uint64 + flagAnAddress string + flagAnPubkey string + flagInternalNodePrivInfoDir string + flagNodeConfigJson string + flagCollectionClusters int + flagStartView uint64 + flagNumViewsInEpoch uint64 + flagNumViewsInStakingAuction uint64 + flagEpochCounter uint64 ) func init() { @@ -51,9 +51,10 @@ func addGenerateRecoverEpochTxArgsCmdFlags() { "path to a JSON file containing multiple node configurations (fields Role, Address, Weight)") generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory "+ "containing the output from the `keygen` command for internal nodes") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagStartView, "start-view", 0, "start view of the recovery epoch") - generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagStakingEndView, "staking-end-view", 0, "end view of the staking phase of the recovery epoch") - generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEndView, "end-view", 0, "end view of the recovery epoch") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInEpoch, "epoch-length", 4000, "length of each epoch measured in views") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInStakingAuction, "epoch-staking-phase-length", 100, "length of the epoch staking phase measured in views") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "the epoch counter used to generate the root cluster block") } @@ -183,11 +184,21 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { log.Fatal().Err(err).Msg("failed to convert cluster qcs to cadence type") } + currEpochFinalView, err := epoch.FinalView() + if err != nil { + log.Fatal().Err(err).Msg("failed to get final view of current epoch") + } + args := []cadence.Value{ - cadence.NewUInt64(flagStartView), - cadence.NewUInt64(flagStakingEndView), - cadence.NewUInt64(flagEndView), + // epoch start view + cadence.NewUInt64(currEpochFinalView + 1), + // staking phase end view + cadence.NewUInt64(currEpochFinalView + flagNumViewsInStakingAuction), + // epoch end view + cadence.NewUInt64(currEpochFinalView + flagNumViewsInEpoch), + // dkg pub keys cadence.NewArray(dkgPubKeys), + // node ids cadence.NewArray(nodeIds), //common.ConvertClusterAssignmentsCdc(assignments), } From 6aacb018f07ab1d93a5a9b476abaa3274a4ca1f5 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 2 Apr 2024 00:53:40 -0400 Subject: [PATCH 040/148] use for range loop --- cmd/util/cmd/epochs/cmd/recover.go | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 6b716b63dee..9bfd894661d 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -125,14 +125,13 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { } log.Info().Msg("") - collectors.Map(func(identity flow.Identity) flow.Identity { - if _, ok := internalNodesMap[identity.NodeID]; ok { - internalCollectors = append(internalCollectors, &identity) + for _, collector := range collectors { + if _, ok := internalNodesMap[collector.NodeID]; ok { + internalCollectors = append(internalCollectors, collector) } else { - partnerCollectors = append(partnerCollectors, &identity) + partnerCollectors = append(partnerCollectors, collector) } - return identity - }) + } currentEpochDKG, err := epoch.DKG() if err != nil { @@ -156,25 +155,25 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { dkgPubKeys := make([]cadence.Value, 0) nodeIds := make([]cadence.Value, 0) - ids.Map(func(identity flow.Identity) flow.Identity { - if identity.GetRole() == flow.RoleConsensus { - dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(identity.GetNodeID()) + + for _, id := range ids { + if id.GetRole() == flow.RoleConsensus { + dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(id.GetNodeID()) if keyShareErr != nil { - log.Fatal().Err(keyShareErr).Msg(fmt.Sprintf("failed to get dkg pub key share for node: %s", identity.GetNodeID())) + log.Fatal().Err(keyShareErr).Msg(fmt.Sprintf("failed to get dkg pub key share for node: %s", id.GetNodeID())) } dkgPubKeyCdc, cdcErr := cadence.NewString(dkgPubKey.String()) if cdcErr != nil { - log.Fatal().Err(cdcErr).Msg(fmt.Sprintf("failed to get dkg pub key cadence string for node: %s", identity.GetNodeID())) + log.Fatal().Err(cdcErr).Msg(fmt.Sprintf("failed to get dkg pub key cadence string for node: %s", id.GetNodeID())) } dkgPubKeys = append(dkgPubKeys, dkgPubKeyCdc) } - nodeIdCdc, err := cadence.NewString(identity.GetNodeID().String()) + nodeIdCdc, err := cadence.NewString(id.GetNodeID().String()) if err != nil { - log.Fatal().Err(err).Msg(fmt.Sprintf("failed to convert node ID to cadence string: %s", identity.GetNodeID())) + log.Fatal().Err(err).Msg(fmt.Sprintf("failed to convert node ID to cadence string: %s", id.GetNodeID())) } nodeIds = append(nodeIds, nodeIdCdc) - return identity - }) + } // @TODO: cluster qcs are converted into flow.ClusterQCVoteData types, // we need a corresponding type in cadence on the FlowClusterQC contract From 82c5803847db73bf462674ce37ff80765dfab159 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 2 Apr 2024 01:09:39 -0400 Subject: [PATCH 041/148] dkg group key should be the first key in the array --- cmd/util/cmd/epochs/cmd/recover.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 9bfd894661d..1b6655ba4c4 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -156,6 +156,11 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { dkgPubKeys := make([]cadence.Value, 0) nodeIds := make([]cadence.Value, 0) + dkgGroupKeyCdc, cdcErr := cadence.NewString(currentEpochDKG.GroupKey().String()) + if cdcErr != nil { + log.Fatal().Err(cdcErr).Msg("failed to get dkg group key cadence string") + } + dkgPubKeys = append(dkgPubKeys, dkgGroupKeyCdc) for _, id := range ids { if id.GetRole() == flow.RoleConsensus { dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(id.GetNodeID()) From 1d677902fbce280c008258503dbf8e7250b6ef8c Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 2 Apr 2024 01:10:03 -0400 Subject: [PATCH 042/148] Update cmd/util/cmd/common/clusters.go Co-authored-by: Alexander Hentschel --- cmd/util/cmd/common/clusters.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index b8055acc2a1..cb1d23174a3 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -55,8 +55,9 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes // first, round-robin internal nodes into each cluster for i, node := range internals { - identifierLists[i%numCollectionClusters] = append(identifierLists[i%numCollectionClusters], node.NodeID) - constraint[i%numCollectionClusters] += 1 + clusterIndex := i % numCollectionClusters + identifierLists[clusterIndex] = append(identifierLists[clusterIndex], node.NodeID) + constraint[clusterIndex] += 1 } // next, round-robin partner nodes into each cluster From dd3022691531d2be4ad18224d7b2b2226797d4b4 Mon Sep 17 00:00:00 2001 From: Andrii Date: Tue, 2 Apr 2024 12:15:37 +0300 Subject: [PATCH 043/148] Refactored functions --- .../cohort2/observer_indexer_enabled_test.go | 46 +++++++++---------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_test.go index a80e10af988..cb7273f65a6 100644 --- a/integration/tests/access/cohort2/observer_indexer_enabled_test.go +++ b/integration/tests/access/cohort2/observer_indexer_enabled_test.go @@ -387,11 +387,26 @@ func (s *ObserverIndexerEnabledSuite) TestAllObserverIndexedRPCsHappyPath() { }) require.NoError(t, err) + eventsByBlockID, err := observerLocal.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + Type: sdk.EventAccountCreated, + BlockIds: [][]byte{blockWithAccount.Block.Id}, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + // GetEventsForBlockIDs - eventsByBlockID := s.checkGetEventsForBlockIDsRPC(ctx, observerLocal, observerUpstream, accessNode, [][]byte{blockWithAccount.Block.Id}) + s.checkGetEventsForBlockIDsRPC(ctx, eventsByBlockID, observerUpstream, accessNode, [][]byte{blockWithAccount.Block.Id}) + + eventsByHeight, err := observerLocal.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: blockWithAccount.Block.Height, + EndHeight: blockWithAccount.Block.Height, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) // GetEventsForHeightRange - eventsByHeight := s.checkGetEventsForHeightRangeRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Height, blockWithAccount.Block.Height) + s.checkGetEventsForHeightRangeRPC(ctx, eventsByHeight, observerUpstream, accessNode, blockWithAccount.Block.Height, blockWithAccount.Block.Height) // validate that there is an event that we are looking for require.Equal(t, eventsByHeight.Results, eventsByBlockID.Results) @@ -652,18 +667,11 @@ func (s *ObserverIndexerEnabledSuite) getRestEndpoints() []RestEndpointTest { func (s *ObserverIndexerEnabledSuite) checkGetEventsForBlockIDsRPC( ctx context.Context, - observerLocal accessproto.AccessAPIClient, + observerLocalResponse *accessproto.EventsResponse, observerUpstream accessproto.AccessAPIClient, accessNode accessproto.AccessAPIClient, blockIds [][]byte, -) *accessproto.EventsResponse { - observerLocalResponse, err := observerLocal.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ - Type: sdk.EventAccountCreated, - BlockIds: blockIds, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - +) { observerUpstreamResponse, err := observerUpstream.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ Type: sdk.EventAccountCreated, BlockIds: blockIds, @@ -680,26 +688,16 @@ func (s *ObserverIndexerEnabledSuite) checkGetEventsForBlockIDsRPC( require.Equal(s.T(), accessNodeResponse.Results, observerLocalResponse.Results) require.Equal(s.T(), accessNodeResponse.Results, observerUpstreamResponse.Results) - - return observerLocalResponse } func (s *ObserverIndexerEnabledSuite) checkGetEventsForHeightRangeRPC( ctx context.Context, - observerLocal accessproto.AccessAPIClient, + observerLocalResponse *accessproto.EventsResponse, observerUpstream accessproto.AccessAPIClient, accessNode accessproto.AccessAPIClient, startHeight uint64, endHeight uint64, -) *accessproto.EventsResponse { - observerLocalResponse, err := observerLocal.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ - Type: sdk.EventAccountCreated, - StartHeight: startHeight, - EndHeight: endHeight, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - +) { observerUpstreamResponse, err := observerUpstream.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ Type: sdk.EventAccountCreated, StartHeight: startHeight, @@ -718,8 +716,6 @@ func (s *ObserverIndexerEnabledSuite) checkGetEventsForHeightRangeRPC( require.Equal(s.T(), accessNodeResponse.Results, observerLocalResponse.Results) require.Equal(s.T(), accessNodeResponse.Results, observerUpstreamResponse.Results) - - return observerLocalResponse } func (s *ObserverIndexerEnabledSuite) checkGetAccountAtBlockHeightRPC( From e8c89f666b8329a21ad888a1018b737846f90105 Mon Sep 17 00:00:00 2001 From: Andrii Slisarchuk Date: Tue, 2 Apr 2024 12:31:21 +0300 Subject: [PATCH 044/148] added tx result to subscription --- access/api.go | 15 ++ access/handler.go | 4 +- .../backend/backend_stream_transactions.go | 118 +++++++---- .../backend_stream_transactions_test.go | 186 +++++++++++------- engine/common/rpc/convert/transactions.go | 18 -- go.mod | 2 + go.sum | 4 +- 7 files changed, 214 insertions(+), 133 deletions(-) diff --git a/access/api.go b/access/api.go index 3201796c6ed..72fe855f98b 100644 --- a/access/api.go +++ b/access/api.go @@ -212,6 +212,21 @@ type TransactionResult struct { BlockHeight uint64 } +// TransactionSubscribeInfo represents information about a subscribed transaction. +// It contains the ID of the transaction, its status, and the index of the associated message. +type TransactionSubscribeInfo struct { + Result *TransactionResult + MessageIndex uint64 +} + +// TransactionSubscribeInfoToMessage converts a TransactionSubscribeInfo struct to a protobuf message +func TransactionSubscribeInfoToMessage(data *TransactionSubscribeInfo) *access.SendAndSubscribeTransactionStatusesResponse { + return &access.SendAndSubscribeTransactionStatusesResponse{ + TransactionResults: TransactionResultToMessage(data.Result), + MessageIndex: data.MessageIndex, + } +} + func TransactionResultToMessage(result *TransactionResult) *access.TransactionResultResponse { return &access.TransactionResultResponse{ Status: entities.TransactionStatus(result.Status), diff --git a/access/handler.go b/access/handler.go index 71e48511aca..c050964fe25 100644 --- a/access/handler.go +++ b/access/handler.go @@ -1113,8 +1113,8 @@ func (h *Handler) SendAndSubscribeTransactionStatuses( } sub := h.api.SubscribeTransactionStatuses(ctx, &tx) - return subscription.HandleSubscription(sub, func(txSubInfo *convert.TransactionSubscribeInfo) error { - err = stream.Send(convert.TransactionSubscribeInfoToMessage(txSubInfo)) + return subscription.HandleSubscription(sub, func(txSubInfo *TransactionSubscribeInfo) error { + err = stream.Send(TransactionSubscribeInfoToMessage(txSubInfo)) if err != nil { return rpc.ConvertError(err, "could not send response", codes.Internal) } diff --git a/engine/access/rpc/backend/backend_stream_transactions.go b/engine/access/rpc/backend/backend_stream_transactions.go index c18ce5905f8..d5188178b65 100644 --- a/engine/access/rpc/backend/backend_stream_transactions.go +++ b/engine/access/rpc/backend/backend_stream_transactions.go @@ -6,22 +6,22 @@ import ( "fmt" "time" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/state" - - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/module/counters" - "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/rs/zerolog" + "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/access/subscription" "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/storage" + + "github.com/onflow/flow/protobuf/go/flow/entities" ) // backendSubscribeTransactions handles transaction subscriptions. @@ -39,13 +39,11 @@ type backendSubscribeTransactions struct { // TransactionSubscriptionMetadata holds data representing the status state for each transaction subscription. type TransactionSubscriptionMetadata struct { - txID flow.Identifier + txResult *access.TransactionResult txReferenceBlockID flow.Identifier messageIndex counters.StrictMonotonousCounter blockWithTx *flow.Header - blockID flow.Identifier txExecuted bool - lastTxStatus flow.TransactionStatus } // SubscribeTransactionStatuses subscribes to transaction status changes starting from the transaction reference block ID. @@ -57,12 +55,14 @@ func (b *backendSubscribeTransactions) SubscribeTransactionStatuses(ctx context. } txInfo := TransactionSubscriptionMetadata{ - txID: tx.ID(), + txResult: &access.TransactionResult{ + TransactionID: tx.ID(), + BlockID: flow.ZeroID, + Status: flow.TransactionStatusUnknown, + }, txReferenceBlockID: tx.ReferenceBlockID, messageIndex: counters.NewMonotonousCounter(0), blockWithTx: nil, - blockID: flow.ZeroID, - lastTxStatus: flow.TransactionStatusUnknown, } sub := subscription.NewHeightBasedSubscription( @@ -80,6 +80,7 @@ func (b *backendSubscribeTransactions) SubscribeTransactionStatuses(ctx context. // subscription responses based on new blocks. func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *TransactionSubscriptionMetadata) func(context.Context, uint64) (interface{}, error) { return func(ctx context.Context, height uint64) (interface{}, error) { + // Get the highest available finalized block height highestHeight, err := b.blockTracker.GetHighestHeight(flow.BlockStatusFinalized) if err != nil { return nil, fmt.Errorf("could not get highest height for block %d: %w", height, err) @@ -92,13 +93,20 @@ func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *Tran return nil, fmt.Errorf("block %d is not available yet: %w", height, subscription.ErrBlockNotReady) } - if txInfo.lastTxStatus == flow.TransactionStatusSealed || txInfo.lastTxStatus == flow.TransactionStatusExpired { - return nil, fmt.Errorf("transaction final status %s was already reported: %w", txInfo.lastTxStatus.String(), subscription.ErrEndOfData) + // If the transaction status already reported the final status, return with no data available + if txInfo.txResult.Status == flow.TransactionStatusSealed || txInfo.txResult.Status == flow.TransactionStatusExpired { + return nil, fmt.Errorf("transaction final status %s was already reported: %w", txInfo.txResult.Status.String(), subscription.ErrEndOfData) } + // If on this step transaction block not available, search for it. if txInfo.blockWithTx == nil { - // Check if block contains transaction. - txInfo.blockWithTx, txInfo.blockID, err = b.searchForTransactionBlock(height, txInfo) + // Search for transaction`s block information. + txInfo.blockWithTx, + txInfo.txResult.BlockID, + txInfo.txResult.BlockHeight, + txInfo.txResult.CollectionID, + err = b.searchForTransactionBlockInfo(height, txInfo) + if err != nil { if errors.Is(err, storage.ErrNotFound) { return nil, fmt.Errorf("could not find block %d in storage: %w", height, subscription.ErrBlockNotReady) @@ -112,18 +120,29 @@ func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *Tran // Find the transaction status. var txStatus flow.TransactionStatus + var txResult *access.TransactionResult + + // If block with transaction was not found, get transaction status to check if it different from last status if txInfo.blockWithTx == nil { txStatus, err = b.txLocalDataProvider.DeriveUnknownTransactionStatus(txInfo.txReferenceBlockID) } else { + // Check, if transaction executed and transaction result already available if !txInfo.txExecuted { - // Check if transaction was executed. - txInfo.txExecuted, err = b.searchForExecutionResult(txInfo.blockID) + txResult, err = b.searchForTransactionResult(ctx, txInfo.txResult.BlockID, txInfo.txResult.TransactionID) if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get execution result for block %s: %v", txInfo.blockID, err) + return nil, status.Errorf(codes.Internal, "failed to get execution result for block %s: %v", txInfo.txResult.BlockID, err) } + //Fill in execution status for future usages + txInfo.txExecuted = txResult != nil } - txStatus, err = b.txLocalDataProvider.DeriveTransactionStatus(txInfo.blockID, txInfo.blockWithTx.Height, txInfo.txExecuted) + // If transaction result was found, fully replace it in metadata. New transaction status already included in result. + if txResult != nil { + txInfo.txResult = txResult + } else { + //If transaction result was not found or already filed in, get transaction status to check if it different from last status + txStatus, err = b.txLocalDataProvider.DeriveTransactionStatus(txInfo.txResult.BlockID, txInfo.blockWithTx.Height, txInfo.txExecuted) + } } if err != nil { if !errors.Is(err, state.ErrUnknownSnapshotReference) { @@ -133,63 +152,84 @@ func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *Tran } // The same transaction status should not be reported, so return here with no response - if txInfo.lastTxStatus == txStatus { + if txInfo.txResult.Status == txStatus { return nil, nil } - txInfo.lastTxStatus = txStatus + + // If the current transaction status different from the last available status, assign new status to result + if txResult == nil { + txInfo.txResult.Status = txStatus + } messageIndex := txInfo.messageIndex.Value() if ok := txInfo.messageIndex.Set(messageIndex + 1); !ok { return nil, status.Errorf(codes.Internal, "the message index has already been incremented to %d", txInfo.messageIndex.Value()) } - return &convert.TransactionSubscribeInfo{ - ID: txInfo.txID, - Status: txInfo.lastTxStatus, + return &access.TransactionSubscribeInfo{ + Result: txInfo.txResult, MessageIndex: messageIndex, }, nil } } -// searchForTransactionBlock searches for the block containing the specified transaction. +// searchForTransactionBlockInfo searches for the block containing the specified transaction. // It retrieves the block at the given height and checks if the transaction is included in that block. // Expected errors: // - subscription.ErrBlockNotReady when unable to retrieve the block or collection ID // - codes.Internal when other errors occur during block or collection lookup -func (b *backendSubscribeTransactions) searchForTransactionBlock( +func (b *backendSubscribeTransactions) searchForTransactionBlockInfo( height uint64, txInfo *TransactionSubscriptionMetadata, -) (*flow.Header, flow.Identifier, error) { +) (*flow.Header, flow.Identifier, uint64, flow.Identifier, error) { block, err := b.txLocalDataProvider.blocks.ByHeight(height) if err != nil { - return nil, flow.ZeroID, fmt.Errorf("error looking up block: %w", err) + return nil, flow.ZeroID, 0, flow.ZeroID, fmt.Errorf("error looking up block: %w", err) } - collectionID, err := b.txLocalDataProvider.LookupCollectionIDInBlock(block, txInfo.txID) + collectionID, err := b.txLocalDataProvider.LookupCollectionIDInBlock(block, txInfo.txResult.TransactionID) if err != nil { - return nil, flow.ZeroID, fmt.Errorf("error looking up transaction in block: %w", err) + return nil, flow.ZeroID, 0, flow.ZeroID, fmt.Errorf("error looking up transaction in block: %w", err) } if collectionID != flow.ZeroID { - return block.Header, block.ID(), nil + return block.Header, block.ID(), height, collectionID, nil } - return nil, flow.ZeroID, nil + return nil, flow.ZeroID, 0, flow.ZeroID, nil } -// searchForExecutionResult searches for the execution result of a block. It retrieves the execution result for the specified block ID. +// searchForTransactionResult searches for the execution result of a block. It retrieves the execution result for the specified block ID. // Expected errors: // - codes.Internal if an internal error occurs while retrieving execution result. -func (b *backendSubscribeTransactions) searchForExecutionResult( +func (b *backendSubscribeTransactions) searchForTransactionResult( + ctx context.Context, blockID flow.Identifier, -) (bool, error) { + txID flow.Identifier, +) (*access.TransactionResult, error) { _, err := b.executionResults.ByBlockID(blockID) if err != nil { if errors.Is(err, storage.ErrNotFound) { - return false, nil + return nil, nil + } + return nil, fmt.Errorf("failed to get execution result for block %s: %w", blockID, err) + } + + block, err := b.txLocalDataProvider.blocks.ByID(blockID) + if err != nil { + return nil, fmt.Errorf("error looking up block: %w", err) + } + + txResult, err := b.txLocalDataProvider.GetTransactionResultFromStorage(ctx, block, txID, entities.EventEncodingVersion_CCF_V0) + if err != nil { + // if either the storage or execution node reported no results or there were not enough execution results + if status.Code(err) == codes.NotFound { + // No result yet, indicate that it has not been executed + return nil, nil } - return false, fmt.Errorf("failed to get execution result for block %s: %w", blockID, err) + // Other Error trying to retrieve the result, return with err + return nil, err } - return true, nil + return txResult, nil } diff --git a/engine/access/rpc/backend/backend_stream_transactions_test.go b/engine/access/rpc/backend/backend_stream_transactions_test.go index 54097bc0428..3a69ded6772 100644 --- a/engine/access/rpc/backend/backend_stream_transactions_test.go +++ b/engine/access/rpc/backend/backend_stream_transactions_test.go @@ -6,6 +6,10 @@ import ( "testing" "time" + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + protocolint "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/engine/access/index" @@ -20,13 +24,13 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + accessapi "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine" access "github.com/onflow/flow-go/engine/access/mock" backendmock "github.com/onflow/flow-go/engine/access/rpc/backend/mock" connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" "github.com/onflow/flow-go/engine/access/subscription" subscriptionmock "github.com/onflow/flow-go/engine/access/subscription/mock" - "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/metrics" @@ -135,6 +139,15 @@ func (s *TransactionStatusSuite) SetupTest() { s.reporter = syncmock.NewIndexReporter(s.T()) s.blocks.On("ByHeight", mock.AnythingOfType("uint64")).Return(mocks.StorageMapGetter(s.blockMap)) + s.blocks.On("ByID", mock.AnythingOfType("flow.Identifier")).Return(func(blockID flow.Identifier) (*flow.Block, error) { + for _, block := range s.blockMap { + if block.ID() == blockID { + return block, nil + } + } + + return nil, nil + }, nil) s.state.On("Final").Return(s.finalSnapshot, nil) s.state.On("AtBlockID", mock.AnythingOfType("flow.Identifier")).Return(func(blockID flow.Identifier) protocolint.Snapshot { @@ -167,7 +180,14 @@ func (s *TransactionStatusSuite) SetupTest() { }, nil) backendParams := s.backendParams() - err := backendParams.TxResultsIndex.Initialize(s.reporter) + s.reporter.On("LowestIndexedHeight").Return(s.rootBlock.Header.Height, nil) + s.reporter.On("HighestIndexedHeight").Return(func() (uint64, error) { + finalizedHeader := s.finalizedBlock.Header + return finalizedHeader.Height, nil + }, nil) + err := backendParams.EventsIndex.Initialize(s.reporter) + require.NoError(s.T(), err) + err = backendParams.TxResultsIndex.Initialize(s.reporter) require.NoError(s.T(), err) s.backend, err = New(backendParams) @@ -237,6 +257,29 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { guarantee := col.Guarantee() light := col.Light() txId := transaction.ID() + txResult := flow.LightTransactionResult{ + TransactionID: txId, + Failed: false, + ComputationUsed: 0, + } + + eventsForTx := unittest.EventsFixture(1, flow.EventAccountCreated) + eventMessages := make([]*entities.Event, 1) + for j, event := range eventsForTx { + eventMessages[j] = convert.EventToMessage(event) + } + + s.events.On( + "ByBlockIDTransactionID", + mock.AnythingOfType("flow.Identifier"), + mock.AnythingOfType("flow.Identifier"), + ).Return(eventsForTx, nil) + + s.transactionResults.On( + "ByBlockIDTransactionID", + mock.AnythingOfType("flow.Identifier"), + mock.AnythingOfType("flow.Identifier"), + ).Return(&txResult, nil) expectedMsgIndexCounter := counters.NewMonotonousCounter(0) @@ -249,17 +292,17 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { "channel closed while waiting for transaction info:\n\t- txID %x\n\t- blockID: %x \n\t- err: %v", txId, s.finalizedBlock.ID(), sub.Err()) - txInfo, ok := v.(*convert.TransactionSubscribeInfo) + txInfo, ok := v.(*accessapi.TransactionSubscribeInfo) require.True(s.T(), ok, "unexpected response type: %T", v) - assert.Equal(s.T(), txId, txInfo.ID) - assert.Equal(s.T(), expectedTxStatus, txInfo.Status) + assert.Equal(s.T(), txId, txInfo.Result.TransactionID) + assert.Equal(s.T(), expectedTxStatus, txInfo.Result.Status) expectedMsgIndex := expectedMsgIndexCounter.Value() assert.Equal(s.T(), expectedMsgIndex, txInfo.MessageIndex) wasSet := expectedMsgIndexCounter.Set(expectedMsgIndex + 1) require.True(s.T(), wasSet) - }, time.Second, fmt.Sprintf("timed out waiting for transaction info:\n\t- txID: %x\n\t- blockID: %x", txId, s.finalizedBlock.ID())) + }, 60*time.Second, fmt.Sprintf("timed out waiting for transaction info:\n\t- txID: %x\n\t- blockID: %x", txId, s.finalizedBlock.ID())) } // 1. Subscribe to transaction status and receive the first message with pending status @@ -277,7 +320,6 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { // 3. Add one more finalized block on top of the transaction block and add execution results to storage finalizedResult := unittest.ExecutionResultFixture(unittest.WithBlock(s.finalizedBlock)) s.resultsMap[s.finalizedBlock.ID()] = finalizedResult - s.addNewFinalizedBlock(s.finalizedBlock.Header, true) checkNewSubscriptionMessage(sub, flow.TransactionStatusExecuted) @@ -299,68 +341,68 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") } -// TestSubscribeTransactionStatusExpired tests the functionality of the SubscribeTransactionStatuses method in the Backend -// when transaction become expired -func (s *TransactionStatusSuite) TestSubscribeTransactionStatusExpired() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - s.blocks.On("GetLastFullBlockHeight").Return(func() (uint64, error) { - return s.sealedBlock.Header.Height, nil - }, nil) - - // Generate sent transaction with ref block of the current finalized block - transaction := unittest.TransactionFixture() - transaction.SetReferenceBlockID(s.finalizedBlock.ID()) - txId := transaction.ID() - - expectedMsgIndexCounter := counters.NewMonotonousCounter(0) - - // Create a special common function to read subscription messages from the channel and check converting it to transaction info - // and check results for correctness - checkNewSubscriptionMessage := func(sub subscription.Subscription, expectedTxStatus flow.TransactionStatus) { - unittest.RequireReturnsBefore(s.T(), func() { - v, ok := <-sub.Channel() - require.True(s.T(), ok, - "channel closed while waiting for transaction info:\n\t- txID %x\n\t- blockID: %x \n\t- err: %v", - txId, s.finalizedBlock.ID(), sub.Err()) - - txInfo, ok := v.(*convert.TransactionSubscribeInfo) - require.True(s.T(), ok, "unexpected response type: %T", v) - - assert.Equal(s.T(), txId, txInfo.ID) - assert.Equal(s.T(), expectedTxStatus, txInfo.Status) - - expectedMsgIndex := expectedMsgIndexCounter.Value() - assert.Equal(s.T(), expectedMsgIndex, txInfo.MessageIndex) - wasSet := expectedMsgIndexCounter.Set(expectedMsgIndex + 1) - require.True(s.T(), wasSet) - }, time.Second, fmt.Sprintf("timed out waiting for transaction info:\n\t- txID: %x\n\t- blockID: %x", txId, s.finalizedBlock.ID())) - } - - // Subscribe to transaction status and receive the first message with pending status - sub := s.backend.SubscribeTransactionStatuses(ctx, &transaction.TransactionBody) - checkNewSubscriptionMessage(sub, flow.TransactionStatusPending) - - // Generate 600 blocks without transaction included and check, that transaction still pending - startHeight := s.finalizedBlock.Header.Height + 1 - lastHeight := startHeight + flow.DefaultTransactionExpiry - - for i := startHeight; i <= lastHeight; i++ { - s.sealedBlock = s.finalizedBlock - s.addNewFinalizedBlock(s.sealedBlock.Header, false) - } - - // Generate final blocks and check transaction expired - s.sealedBlock = s.finalizedBlock - s.addNewFinalizedBlock(s.sealedBlock.Header, true) - checkNewSubscriptionMessage(sub, flow.TransactionStatusExpired) - - // Ensure subscription shuts down gracefully - unittest.RequireReturnsBefore(s.T(), func() { - v, ok := <-sub.Channel() - assert.Nil(s.T(), v) - assert.False(s.T(), ok) - assert.NoError(s.T(), sub.Err()) - }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") -} +//// TestSubscribeTransactionStatusExpired tests the functionality of the SubscribeTransactionStatuses method in the Backend +//// when transaction become expired +//func (s *TransactionStatusSuite) TestSubscribeTransactionStatusExpired() { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// s.blocks.On("GetLastFullBlockHeight").Return(func() (uint64, error) { +// return s.sealedBlock.Header.Height, nil +// }, nil) +// +// // Generate sent transaction with ref block of the current finalized block +// transaction := unittest.TransactionFixture() +// transaction.SetReferenceBlockID(s.finalizedBlock.ID()) +// txId := transaction.ID() +// +// expectedMsgIndexCounter := counters.NewMonotonousCounter(0) +// +// // Create a special common function to read subscription messages from the channel and check converting it to transaction info +// // and check results for correctness +// checkNewSubscriptionMessage := func(sub subscription.Subscription, expectedTxStatus flow.TransactionStatus) { +// unittest.RequireReturnsBefore(s.T(), func() { +// v, ok := <-sub.Channel() +// require.True(s.T(), ok, +// "channel closed while waiting for transaction info:\n\t- txID %x\n\t- blockID: %x \n\t- err: %v", +// txId, s.finalizedBlock.ID(), sub.Err()) +// +// txInfo, ok := v.(*accessapi.TransactionSubscribeInfo) +// require.True(s.T(), ok, "unexpected response type: %T", v) +// +// assert.Equal(s.T(), txId, txInfo.Result.TransactionID) +// assert.Equal(s.T(), expectedTxStatus, txInfo.Result.Status) +// +// expectedMsgIndex := expectedMsgIndexCounter.Value() +// assert.Equal(s.T(), expectedMsgIndex, txInfo.MessageIndex) +// wasSet := expectedMsgIndexCounter.Set(expectedMsgIndex + 1) +// require.True(s.T(), wasSet) +// }, time.Second, fmt.Sprintf("timed out waiting for transaction info:\n\t- txID: %x\n\t- blockID: %x", txId, s.finalizedBlock.ID())) +// } +// +// // Subscribe to transaction status and receive the first message with pending status +// sub := s.backend.SubscribeTransactionStatuses(ctx, &transaction.TransactionBody) +// checkNewSubscriptionMessage(sub, flow.TransactionStatusPending) +// +// // Generate 600 blocks without transaction included and check, that transaction still pending +// startHeight := s.finalizedBlock.Header.Height + 1 +// lastHeight := startHeight + flow.DefaultTransactionExpiry +// +// for i := startHeight; i <= lastHeight; i++ { +// s.sealedBlock = s.finalizedBlock +// s.addNewFinalizedBlock(s.sealedBlock.Header, false) +// } +// +// // Generate final blocks and check transaction expired +// s.sealedBlock = s.finalizedBlock +// s.addNewFinalizedBlock(s.sealedBlock.Header, true) +// checkNewSubscriptionMessage(sub, flow.TransactionStatusExpired) +// +// // Ensure subscription shuts down gracefully +// unittest.RequireReturnsBefore(s.T(), func() { +// v, ok := <-sub.Channel() +// assert.Nil(s.T(), v) +// assert.False(s.T(), ok) +// assert.NoError(s.T(), sub.Err()) +// }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") +//} diff --git a/engine/common/rpc/convert/transactions.go b/engine/common/rpc/convert/transactions.go index 6b92f419fdd..221f41b0936 100644 --- a/engine/common/rpc/convert/transactions.go +++ b/engine/common/rpc/convert/transactions.go @@ -1,29 +1,11 @@ package convert import ( - "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow/protobuf/go/flow/entities" "github.com/onflow/flow-go/model/flow" ) -// TransactionSubscribeInfo represents information about a subscribed transaction. -// It contains the ID of the transaction, its status, and the index of the associated message. -type TransactionSubscribeInfo struct { - ID flow.Identifier - Status flow.TransactionStatus - MessageIndex uint64 -} - -// TransactionSubscribeInfoToMessage converts a TransactionSubscribeInfo struct to a protobuf message -func TransactionSubscribeInfoToMessage(data *TransactionSubscribeInfo) *access.SendAndSubscribeTransactionStatusesResponse { - return &access.SendAndSubscribeTransactionStatusesResponse{ - Id: data.ID[:], - Status: entities.TransactionStatus(data.Status), - MessageIndex: data.MessageIndex, - } -} - // TransactionToMessage converts a flow.TransactionBody to a protobuf message func TransactionToMessage(tb flow.TransactionBody) *entities.Transaction { proposalKeyMessage := &entities.Transaction_ProposalKey{ diff --git a/go.mod b/go.mod index 9d85128975a..900297a87de 100644 --- a/go.mod +++ b/go.mod @@ -320,3 +320,5 @@ require ( // Using custom fork until https://github.com/onflow/flow-go/issues/5338 is resolved replace github.com/ipfs/boxo => github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 + +replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240305102946-3efec6679252 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240326125130-d668d54a6c4c diff --git a/go.sum b/go.sum index b3f3bb2354c..e103b9ed4c0 100644 --- a/go.sum +++ b/go.sum @@ -111,6 +111,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240326125130-d668d54a6c4c h1:TUP9qXmzeERCNZ5HAxh99epDSnfxA7Ksyv/71n+avdw= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240326125130-d668d54a6c4c/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= @@ -1370,8 +1372,6 @@ github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8 github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= -github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240305102946-3efec6679252 h1:W0xm80Qc5RkFJw7yQIj7OiMacCZw3et/tx/5N9rN2qk= -github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240305102946-3efec6679252/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= github.com/onflow/sdks v0.5.0 h1:2HCRibwqDaQ1c9oUApnkZtEAhWiNY2GTpRD5+ftdkN8= From 8d6611f5cf9b732e58ed2e21d438683850f08b78 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 2 Apr 2024 10:51:06 -0400 Subject: [PATCH 045/148] add godoc for ConstructRootQCsForClusters --- cmd/util/cmd/common/clusters.go | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index 6f2b67f500c..2a335358c16 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -18,7 +18,6 @@ import ( ) // ConstructClusterAssignment random cluster assignment with internal and partner nodes. -// The number of clusters is read from the `flagCollectionClusters` flag. // The number of nodes in each cluster is deterministic and only depends on the number of clusters // and the number of nodes. The repartition of internal and partner nodes is also deterministic // and only depends on the number of clusters and nodes. @@ -29,6 +28,15 @@ import ( // satisfied, an exception is returned. // Note that if an exception is returned with a certain number of internal/partner nodes, there is no chance // of succeeding the assignment by re-running the function without increasing the internal nodes ratio. +// Args: +// - log: the logger instance. +// - partnerNodes: identity list of partner nodes. +// - internalNodes: identity list of internal nodes. +// - numCollectionClusters: the number of collectors in each generated cluster. +// Returns: +// - flow.AssignmentList: the generated assignment list. +// - flow.ClusterList: the generate collection cluster list. +// - error: if any error occurs. Any error returned from this function is irrecoverable. func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes flow.IdentityList, numCollectionClusters int) (flow.AssignmentList, flow.ClusterList, error) { partners := partnerNodes.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) @@ -64,8 +72,9 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes // next, round-robin partner nodes into each cluster for i, node := range partners { - identifierLists[i%len(identifierLists)] = append(identifierLists[i%len(identifierLists)], node.NodeID) - constraint[i%numCollectionClusters] -= 2 + clusterIndex := i % numCollectionClusters + identifierLists[clusterIndex] = append(identifierLists[clusterIndex], node.NodeID) + constraint[clusterIndex] -= 2 } // check the 2/3 constraint: for every cluster `i`, constraint[i] must be strictly positive @@ -86,6 +95,16 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes return assignments, clusters, nil } +// ConstructRootQCsForClusters constructs a root QC for each cluster in the list. +// Args: +// - log: the logger instance. +// - clusterList: identity list of partner nodes. +// - nodeInfos: identity list of internal nodes. +// - clusterBlocks: the number of collectors in each generated cluster. +// Returns: +// - flow.AssignmentList: the generated assignment list. +// - flow.ClusterList: the generate collection cluster list. +// - error: if any error occurs. Any error returned from this function is irrecoverable. func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block) []*flow.QuorumCertificate { if len(clusterBlocks) != len(clusterList) { From 263974e804bf6a155ff1d9f35c2043a2b9fd552e Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 2 Apr 2024 11:08:21 -0400 Subject: [PATCH 046/148] add godoc for *PartnerInfo util funcs --- cmd/bootstrap/cmd/finalize.go | 2 +- cmd/bootstrap/cmd/rootblock.go | 2 +- cmd/util/cmd/common/node_info.go | 44 ++++++++++++++++++++++++++------ 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/cmd/bootstrap/cmd/finalize.go b/cmd/bootstrap/cmd/finalize.go index e4bf67fca96..ea4a2c441c7 100644 --- a/cmd/bootstrap/cmd/finalize.go +++ b/cmd/bootstrap/cmd/finalize.go @@ -112,7 +112,7 @@ func finalize(cmd *cobra.Command, args []string) { } log.Info().Msg("collecting partner network and staking keys") - partnerNodes := common.ReadPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) + partnerNodes := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) log.Info().Msg("") log.Info().Msg("generating internal private networking and staking keys") diff --git a/cmd/bootstrap/cmd/rootblock.go b/cmd/bootstrap/cmd/rootblock.go index cb6085eb383..eaf896fa903 100644 --- a/cmd/bootstrap/cmd/rootblock.go +++ b/cmd/bootstrap/cmd/rootblock.go @@ -145,7 +145,7 @@ func rootBlock(cmd *cobra.Command, args []string) { } log.Info().Msg("collecting partner network and staking keys") - partnerNodes := common.ReadPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) + partnerNodes := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) log.Info().Msg("") log.Info().Msg("generating internal private networking and staking keys") diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go index 2df112f2817..d90ef6a59a6 100644 --- a/cmd/util/cmd/common/node_info.go +++ b/cmd/util/cmd/common/node_info.go @@ -1,6 +1,7 @@ package common import ( + "fmt" "strings" "github.com/rs/zerolog" @@ -9,18 +10,23 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// ReadPartnerNodeInfos returns a list of partner nodes after gathering weights -// and public key information from configuration files -func ReadPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNodeInfoDir string) []bootstrap.NodeInfo { +// ReadFullPartnerNodeInfos reads partner node info and partner weight information from the specified paths and constructs +// a list of full bootstrap.NodeInfo for each partner node. +// Args: +// - log: the logger instance. +// - partnerWeightsPath: path to partner weights configuration file. +// - partnerNodeInfoDir: path to partner nodes configuration file. +// Returns: +// - []bootstrap.NodeInfo: the generated node info list. +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadFullPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNodeInfoDir string) []bootstrap.NodeInfo { partners := ReadPartnerNodes(log, partnerNodeInfoDir) log.Info().Msgf("read %d partner node configuration files", len(partners)) - var weights PartnerWeights - err := ReadJSON(partnerWeightsPath, &weights) + weights, err := ReadPartnerWeights(partnerWeightsPath) if err != nil { - log.Fatal().Err(err).Msg("failed to read partner weights json") + log.Fatal().Err(fmt.Errorf("failed to read partner weights: %w", err)) } - log.Info().Msgf("read %d weights for partner nodes", len(weights)) var nodes []bootstrap.NodeInfo for _, partner := range partners { @@ -51,7 +57,29 @@ func ReadPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNodeInf return nodes } -// ReadPartnerNodes reads the partner node information +// ReadPartnerWeights reads the partner weights configuration file and returns a list of PartnerWeights. +// Args: +// - partnerWeightsPath: path to partner weights configuration file. +// Returns: +// - PartnerWeights: the generated partner weights list. +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadPartnerWeights(partnerWeightsPath string) (PartnerWeights, error) { + var weights PartnerWeights + + err := ReadJSON(partnerWeightsPath, &weights) + if err != nil { + return nil, fmt.Errorf("failed to read partner weights json: %w", err) + } + + return weights, nil +} + +// ReadPartnerNodes reads the partner node info from the configuration file and returns a list of []bootstrap.NodeInfoPub. +// Args: +// - partnerNodeInfoDir: path to partner nodes configuration file. +// Returns: +// - []bootstrap.NodeInfoPub: the generated partner node info list. +// - error: if any error occurs. Any error returned from this function is irrecoverable. func ReadPartnerNodes(log zerolog.Logger, partnerNodeInfoDir string) []bootstrap.NodeInfoPub { var partners []bootstrap.NodeInfoPub files, err := FilesInDir(partnerNodeInfoDir) From c6a1989e7e9883ef6a9c6a96aac3b74f29c3636c Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 2 Apr 2024 11:14:00 -0400 Subject: [PATCH 047/148] document GetSnapshotAtEpochAndPhase arguments --- cmd/util/cmd/common/snapshot.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cmd/util/cmd/common/snapshot.go b/cmd/util/cmd/common/snapshot.go index 8bca3b88a9a..5d73895d5ff 100644 --- a/cmd/util/cmd/common/snapshot.go +++ b/cmd/util/cmd/common/snapshot.go @@ -43,6 +43,16 @@ func GetSnapshot(ctx context.Context, client *grpc.Client) (*inmem.Snapshot, err // GetSnapshotAtEpochAndPhase will get the latest finalized protocol snapshot and check the current epoch and epoch phase. // If we are past the target epoch and epoch phase we exit the retry mechanism immediately. // If not check the snapshot at the specified interval until we reach the target epoch and phase. +// Args: +// - ctx: context used when getting the snapshot from the network. +// - log: the logger +// - startupEpoch: the desired epoch in which to take a snapshot for startup. +// - startupEpochPhase: the desired epoch phase in which to take a snapshot for startup. +// - retryInterval: sleep interval used to retry getting the snapshot from the network in our desired epoch and epoch phase. +// - getSnapshot: func used to get the snapshot. +// Returns: +// - protocol.Snapshot: the protocol snapshot. +// - error: if any error occurs. Any error returned from this function is irrecoverable. func GetSnapshotAtEpochAndPhase(ctx context.Context, log zerolog.Logger, startupEpoch uint64, startupEpochPhase flow.EpochPhase, retryInterval time.Duration, getSnapshot GetProtocolSnapshot) (protocol.Snapshot, error) { start := time.Now() From 32df1042c22c35a596f9b8e669619e7537c235a9 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 2 Apr 2024 12:42:53 -0400 Subject: [PATCH 048/148] refactor fatal level logs - emit fatal level logs from the caller , update funcs to returns errs instead --- cmd/bootstrap/cmd/final_list.go | 41 ++++++++--- cmd/bootstrap/cmd/finalize.go | 11 ++- cmd/bootstrap/cmd/rootblock.go | 11 ++- cmd/util/cmd/common/node_info.go | 106 ++++++++++++++++++----------- cmd/util/cmd/common/utils.go | 42 +++++++++--- cmd/util/cmd/epochs/cmd/recover.go | 6 +- 6 files changed, 152 insertions(+), 65 deletions(-) diff --git a/cmd/bootstrap/cmd/final_list.go b/cmd/bootstrap/cmd/final_list.go index f1a1e5b1901..709e041ff63 100644 --- a/cmd/bootstrap/cmd/final_list.go +++ b/cmd/bootstrap/cmd/final_list.go @@ -1,6 +1,7 @@ package cmd import ( + "fmt" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd" @@ -233,7 +234,11 @@ func checkMismatchingNodes(localNodes []model.NodeInfo, registeredNodes []model. } func assembleInternalNodesWithoutWeight() []model.NodeInfo { - privInternals := common.ReadInternalNodes(log, flagInternalNodePrivInfoDir) + privInternals, err := common.ReadInternalNodeInfos(flagInternalNodePrivInfoDir) + if err != nil { + log.Fatal().Err(err).Msg("failed to read internal node infos") + } + log.Info().Msgf("read %v internal private node-info files", len(privInternals)) var nodes []model.NodeInfo @@ -242,9 +247,13 @@ func assembleInternalNodesWithoutWeight() []model.NodeInfo { common.ValidateAddressFormat(log, internal.Address) // validate every single internal node - nodeID := common.ValidateNodeID(log, internal.NodeID) + err := common.ValidateNodeID(internal.NodeID) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("invalid node ID: %s", internal.NodeID)) + } + node := model.NewPrivateNodeInfo( - nodeID, + internal.NodeID, internal.Role, internal.Address, flow.DefaultInitialWeight, @@ -259,7 +268,10 @@ func assembleInternalNodesWithoutWeight() []model.NodeInfo { } func assemblePartnerNodesWithoutWeight() []model.NodeInfo { - partners := common.ReadPartnerNodes(log, flagPartnerNodeInfoDir) + partners, err := common.ReadPartnerNodeInfos(flagPartnerNodeInfoDir) + if err != nil { + log.Fatal().Err(err).Msg("failed to read partner node infos") + } log.Info().Msgf("read %v partner node configuration files", len(partners)) return createPublicNodeInfo(partners) } @@ -279,18 +291,27 @@ func createPublicNodeInfo(nodes []model.NodeInfoPub) []model.NodeInfo { common.ValidateAddressFormat(log, n.Address) // validate every single partner node - nodeID := common.ValidateNodeID(log, n.NodeID) - networkPubKey := common.ValidateNetworkPubKey(log, n.NetworkPubKey) - stakingPubKey := common.ValidateStakingPubKey(log, n.StakingPubKey) + err := common.ValidateNodeID(n.NodeID) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("invalid node ID: %s", n.NodeID)) + } + err = common.ValidateNetworkPubKey(n.NetworkPubKey) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("invalid network public key: %s", n.NetworkPubKey)) + } + err = common.ValidateStakingPubKey(n.StakingPubKey) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("invalid staking public key: %s", n.StakingPubKey)) + } // all nodes should have equal weight node := model.NewPublicNodeInfo( - nodeID, + n.NodeID, n.Role, n.Address, flow.DefaultInitialWeight, - networkPubKey, - stakingPubKey, + n.NetworkPubKey, + n.StakingPubKey, ) publicInfoNodes = append(publicInfoNodes, node) diff --git a/cmd/bootstrap/cmd/finalize.go b/cmd/bootstrap/cmd/finalize.go index ea4a2c441c7..86b95e0d75a 100644 --- a/cmd/bootstrap/cmd/finalize.go +++ b/cmd/bootstrap/cmd/finalize.go @@ -112,11 +112,18 @@ func finalize(cmd *cobra.Command, args []string) { } log.Info().Msg("collecting partner network and staking keys") - partnerNodes := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) + partnerNodes, err := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full partner node infos") + } log.Info().Msg("") log.Info().Msg("generating internal private networking and staking keys") - internalNodes := common.ReadInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) + internalNodes, err := common.ReadFullInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full internal node infos") + } + log.Info().Msg("") log.Info().Msg("checking constraints on consensus nodes") diff --git a/cmd/bootstrap/cmd/rootblock.go b/cmd/bootstrap/cmd/rootblock.go index eaf896fa903..7b2f97c8923 100644 --- a/cmd/bootstrap/cmd/rootblock.go +++ b/cmd/bootstrap/cmd/rootblock.go @@ -145,11 +145,18 @@ func rootBlock(cmd *cobra.Command, args []string) { } log.Info().Msg("collecting partner network and staking keys") - partnerNodes := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) + partnerNodes, err := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full partner node infos") + } log.Info().Msg("") log.Info().Msg("generating internal private networking and staking keys") - internalNodes := common.ReadInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) + internalNodes, err := common.ReadFullInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full internal node infos") + } + log.Info().Msg("") log.Info().Msg("checking constraints on consensus nodes") diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go index d90ef6a59a6..a03fd27eaf7 100644 --- a/cmd/util/cmd/common/node_info.go +++ b/cmd/util/cmd/common/node_info.go @@ -13,48 +13,61 @@ import ( // ReadFullPartnerNodeInfos reads partner node info and partner weight information from the specified paths and constructs // a list of full bootstrap.NodeInfo for each partner node. // Args: -// - log: the logger instance. +// - log: logger used to log debug information. // - partnerWeightsPath: path to partner weights configuration file. // - partnerNodeInfoDir: path to partner nodes configuration file. // Returns: // - []bootstrap.NodeInfo: the generated node info list. // - error: if any error occurs. Any error returned from this function is irrecoverable. -func ReadFullPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNodeInfoDir string) []bootstrap.NodeInfo { - partners := ReadPartnerNodes(log, partnerNodeInfoDir) +func ReadFullPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNodeInfoDir string) ([]bootstrap.NodeInfo, error) { + partners, err := ReadPartnerNodeInfos(partnerNodeInfoDir) + if err != nil { + return nil, err + } log.Info().Msgf("read %d partner node configuration files", len(partners)) weights, err := ReadPartnerWeights(partnerWeightsPath) if err != nil { - log.Fatal().Err(fmt.Errorf("failed to read partner weights: %w", err)) + return nil, err } + log.Info().Msgf("read %d weights for partner nodes", len(weights)) var nodes []bootstrap.NodeInfo for _, partner := range partners { // validate every single partner node - nodeID := ValidateNodeID(log, partner.NodeID) - networkPubKey := ValidateNetworkPubKey(log, partner.NetworkPubKey) - stakingPubKey := ValidateStakingPubKey(log, partner.StakingPubKey) - weight, valid := ValidateWeight(weights[partner.NodeID]) - if !valid { - log.Error().Msgf("weights: %v", weights) - log.Fatal().Msgf("partner node id %x has no weight", nodeID) + err = ValidateNodeID(partner.NodeID) + if err != nil { + return nil, fmt.Errorf("invalid node ID: %s", partner.NodeID) } + err = ValidateNetworkPubKey(partner.NetworkPubKey) + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("invalid network public key: %s", partner.NetworkPubKey)) + } + err = ValidateStakingPubKey(partner.StakingPubKey) + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("invalid staking public key: %s", partner.StakingPubKey)) + } + weight := weights[partner.NodeID] + if valid := ValidateWeight(weight); !valid { + return nil, fmt.Errorf(fmt.Sprintf("invalid partner weight: %d", weight)) + } + if weight != flow.DefaultInitialWeight { log.Warn().Msgf("partner node (id=%x) has non-default weight (%d != %d)", partner.NodeID, weight, flow.DefaultInitialWeight) } node := bootstrap.NewPublicNodeInfo( - nodeID, + partner.NodeID, partner.Role, partner.Address, weight, - networkPubKey.PublicKey, - stakingPubKey.PublicKey, + partner.NetworkPubKey.PublicKey, + partner.StakingPubKey.PublicKey, ) nodes = append(nodes, node) } - return nodes + return nodes, nil } // ReadPartnerWeights reads the partner weights configuration file and returns a list of PartnerWeights. @@ -70,43 +83,52 @@ func ReadPartnerWeights(partnerWeightsPath string) (PartnerWeights, error) { if err != nil { return nil, fmt.Errorf("failed to read partner weights json: %w", err) } - return weights, nil } -// ReadPartnerNodes reads the partner node info from the configuration file and returns a list of []bootstrap.NodeInfoPub. +// ReadPartnerNodeInfos reads the partner node info from the configuration file and returns a list of []bootstrap.NodeInfoPub. // Args: // - partnerNodeInfoDir: path to partner nodes configuration file. // Returns: // - []bootstrap.NodeInfoPub: the generated partner node info list. // - error: if any error occurs. Any error returned from this function is irrecoverable. -func ReadPartnerNodes(log zerolog.Logger, partnerNodeInfoDir string) []bootstrap.NodeInfoPub { +func ReadPartnerNodeInfos(partnerNodeInfoDir string) ([]bootstrap.NodeInfoPub, error) { var partners []bootstrap.NodeInfoPub files, err := FilesInDir(partnerNodeInfoDir) if err != nil { - log.Fatal().Err(err).Msg("could not read partner node infos") + return nil, fmt.Errorf("could not read partner node infos: %w", err) } for _, f := range files { // skip files that do not include node-infos if !strings.Contains(f, bootstrap.PathPartnerNodeInfoPrefix) { continue } - // read file and append to partners var p bootstrap.NodeInfoPub err = ReadJSON(f, &p) if err != nil { - log.Fatal().Err(err).Msg("failed to read node info") + return nil, fmt.Errorf("failed to read node info: %w", err) } partners = append(partners, p) } - return partners + return partners, nil } -// ReadInternalNodeInfos returns a list of internal nodes after collecting weights -// from configuration files. -func ReadInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, internalWeightsConfig string) []bootstrap.NodeInfo { - privInternals := ReadInternalNodes(log, internalNodePrivInfoDir) +// ReadFullInternalNodeInfos reads internal node info and internal node weight information from the specified paths and constructs +// a list of full bootstrap.NodeInfo for each internal node. +// Args: +// - log: logger used to log debug information. +// - internalNodePrivInfoDir: path to internal nodes private info. +// - internalWeightsConfig: path to internal weights configuration file. +// Returns: +// - []bootstrap.NodeInfo: the generated node info list. +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadFullInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, internalWeightsConfig string) ([]bootstrap.NodeInfo, error) { + privInternals, err := ReadInternalNodeInfos(internalNodePrivInfoDir) + if err != nil { + return nil, err + } + log.Info().Msgf("read %v internal private node-info files", len(privInternals)) weights := internalWeightsByAddress(log, internalWeightsConfig) @@ -118,18 +140,20 @@ func ReadInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, internal ValidateAddressFormat(log, internal.Address) // validate every single internal node - nodeID := ValidateNodeID(log, internal.NodeID) - weight, valid := ValidateWeight(weights[internal.Address]) - if !valid { - log.Error().Msgf("weights: %v", weights) - log.Fatal().Msgf("internal node %v has no weight. Did you forget to update the node address?", internal) + err := ValidateNodeID(internal.NodeID) + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("invalid internal node ID: %s", internal.NodeID)) + } + weight := weights[internal.NodeID.String()] + if valid := ValidateWeight(weight); !valid { + return nil, fmt.Errorf(fmt.Sprintf("invalid partner weight: %d", weight)) } if weight != flow.DefaultInitialWeight { log.Warn().Msgf("internal node (id=%x) has non-default weight (%d != %d)", internal.NodeID, weight, flow.DefaultInitialWeight) } node := bootstrap.NewPrivateNodeInfo( - nodeID, + internal.NodeID, internal.Role, internal.Address, weight, @@ -140,18 +164,22 @@ func ReadInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, internal nodes = append(nodes, node) } - return nodes + return nodes, nil } -// ReadInternalNodes reads our internal node private infos generated by -// `keygen` command and returns it -func ReadInternalNodes(log zerolog.Logger, internalNodePrivInfoDir string) []bootstrap.NodeInfoPriv { +// ReadInternalNodeInfos reads our internal node private infos generated by `keygen` command and returns it. +// Args: +// - internalNodePrivInfoDir: path to internal nodes private info. +// Returns: +// - []bootstrap.NodeInfo: the generated private node info list. +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadInternalNodeInfos(internalNodePrivInfoDir string) ([]bootstrap.NodeInfoPriv, error) { var internalPrivInfos []bootstrap.NodeInfoPriv // get files in internal priv node infos directory files, err := FilesInDir(internalNodePrivInfoDir) if err != nil { - log.Fatal().Err(err).Msg("could not read partner node infos") + return nil, fmt.Errorf("could not read partner node infos: %w", err) } // for each of the files @@ -165,12 +193,12 @@ func ReadInternalNodes(log zerolog.Logger, internalNodePrivInfoDir string) []boo var p bootstrap.NodeInfoPriv err = ReadJSON(f, &p) if err != nil { - log.Fatal().Err(err).Msg("failed to read json") + return nil, fmt.Errorf("failed to read json: %w", err) } internalPrivInfos = append(internalPrivInfos, p) } - return internalPrivInfos + return internalPrivInfos, nil } // internalWeightsByAddress returns a mapping of node address by weight for internal nodes diff --git a/cmd/util/cmd/common/utils.go b/cmd/util/cmd/common/utils.go index a162feb4e65..6ee6b2460ac 100644 --- a/cmd/util/cmd/common/utils.go +++ b/cmd/util/cmd/common/utils.go @@ -130,29 +130,49 @@ func ValidateAddressFormat(log zerolog.Logger, address string) { checkErr(err) } -func ValidateNodeID(lg zerolog.Logger, nodeID flow.Identifier) flow.Identifier { +// ValidateNodeID returns an error if node ID is non-zero. +// Args: +// - nodeID: the node ID to validate. +// Returns: +// - error: if node id is the zero value. +func ValidateNodeID(nodeID flow.Identifier) error { if nodeID == flow.ZeroID { - lg.Fatal().Msg("NodeID must not be zero") + return fmt.Errorf("NodeID must not be zero") } - return nodeID + return nil } -func ValidateNetworkPubKey(lg zerolog.Logger, key encodable.NetworkPubKey) encodable.NetworkPubKey { +// ValidateNetworkPubKey returns an error if network public key is nil. +// Args: +// - key: the public key. +// Returns: +// - error: if the network key is nil. +func ValidateNetworkPubKey(key encodable.NetworkPubKey) error { if key.PublicKey == nil { - lg.Fatal().Msg("NetworkPubKey must not be nil") + return fmt.Errorf("network public key must not be nil") } - return key + return nil } -func ValidateStakingPubKey(lg zerolog.Logger, key encodable.StakingPubKey) encodable.StakingPubKey { +// ValidateStakingPubKey returns an error if the staking key is nil. +// Args: +// - key: the public key. +// Returns: +// - error: if the staking key is nil. +func ValidateStakingPubKey(key encodable.StakingPubKey) error { if key.PublicKey == nil { - lg.Fatal().Msg("StakingPubKey must not be nil") + return fmt.Errorf("staking public key must not be nil") } - return key + return nil } -func ValidateWeight(weight uint64) (uint64, bool) { - return weight, weight > 0 +// ValidateWeight returns true if weight is greater than 0. +// Args: +// - weight: the weight to check. +// Returns: +// - bool: true if weight is greater than 0. +func ValidateWeight(weight uint64) bool { + return weight > 0 } // PartnerWeights is the format of the JSON file specifying partner node weights. diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 03ced227b5e..e6a69b4203e 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -114,7 +114,11 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { partnerCollectors := make(flow.IdentityList, 0) log.Info().Msg("collecting internal node network and staking keys") - internalNodes := common.ReadInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagNodeConfigJson) + internalNodes, err := common.ReadFullInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagNodeConfigJson) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full internal node infos") + } + internalNodesMap := make(map[flow.Identifier]struct{}) for _, node := range internalNodes { if !ids.Exists(node.Identity()) { From 4b3dae6aa900098ba077354464728357ffe4b439 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 2 Apr 2024 13:17:44 -0400 Subject: [PATCH 049/148] fix node ids in test root snapshot fixture --- cmd/util/cmd/common/node_info.go | 6 +++-- cmd/util/cmd/epochs/cmd/recover.go | 8 +++---- cmd/util/cmd/epochs/cmd/recover_test.go | 30 +++++++++++++++++-------- 3 files changed, 29 insertions(+), 15 deletions(-) diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go index a03fd27eaf7..2ee669fe0fa 100644 --- a/cmd/util/cmd/common/node_info.go +++ b/cmd/util/cmd/common/node_info.go @@ -47,6 +47,7 @@ func ReadFullPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNod if err != nil { return nil, fmt.Errorf(fmt.Sprintf("invalid staking public key: %s", partner.StakingPubKey)) } + weight := weights[partner.NodeID] if valid := ValidateWeight(weight); !valid { return nil, fmt.Errorf(fmt.Sprintf("invalid partner weight: %d", weight)) @@ -128,7 +129,7 @@ func ReadFullInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, inte if err != nil { return nil, err } - + log.Info().Msgf("read %v internal private node-info files", len(privInternals)) weights := internalWeightsByAddress(log, internalWeightsConfig) @@ -144,7 +145,8 @@ func ReadFullInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, inte if err != nil { return nil, fmt.Errorf(fmt.Sprintf("invalid internal node ID: %s", internal.NodeID)) } - weight := weights[internal.NodeID.String()] + weight := weights[internal.Address] + if valid := ValidateWeight(weight); !valid { return nil, fmt.Errorf(fmt.Sprintf("invalid partner weight: %d", weight)) } diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index e6a69b4203e..19c609e3a0b 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -32,7 +32,6 @@ var ( flagInternalNodePrivInfoDir string flagNodeConfigJson string flagCollectionClusters int - flagStartView uint64 flagNumViewsInEpoch uint64 flagNumViewsInStakingAuction uint64 flagEpochCounter uint64 @@ -51,7 +50,6 @@ func addGenerateRecoverEpochTxArgsCmdFlags() { "path to a JSON file containing multiple node configurations (fields Role, Address, Weight)") generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory "+ "containing the output from the `keygen` command for internal nodes") - generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagStartView, "start-view", 0, "start view of the recovery epoch") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInEpoch, "epoch-length", 4000, "length of each epoch measured in views") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInStakingAuction, "epoch-staking-phase-length", 100, "length of the epoch staking phase measured in views") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "the epoch counter used to generate the root cluster block") @@ -142,7 +140,8 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { } log.Info().Msg("computing collection node clusters") - _, clusters, err := common.ConstructClusterAssignment(log, partnerCollectors, internalCollectors, flagCollectionClusters) + + assignments, clusters, err := common.ConstructClusterAssignment(log, partnerCollectors, internalCollectors, flagCollectionClusters) if err != nil { log.Fatal().Err(err).Msg("unable to generate cluster assignment") } @@ -207,7 +206,8 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { cadence.NewArray(dkgPubKeys), // node ids cadence.NewArray(nodeIds), - //common.ConvertClusterAssignmentsCdc(assignments), + // clusters, + common.ConvertClusterAssignmentsCdc(assignments), } return args diff --git a/cmd/util/cmd/epochs/cmd/recover_test.go b/cmd/util/cmd/epochs/cmd/recover_test.go index aa6efa9b17e..c20e1fffe1e 100644 --- a/cmd/util/cmd/epochs/cmd/recover_test.go +++ b/cmd/util/cmd/epochs/cmd/recover_test.go @@ -3,6 +3,8 @@ package cmd import ( "bytes" "encoding/json" + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/model/flow" "testing" "github.com/stretchr/testify/require" @@ -17,8 +19,21 @@ func TestRecoverEpochHappyPath(t *testing.T) { // tests that given the root snapshot, the command // writes the expected arguments to stdout. utils.RunWithSporkBootstrapDir(t, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { + internalNodes, err := common.ReadFullInternalNodeInfos(log, internalPrivDir, configPath) + require.NoError(t, err) + partnerNodes, err := common.ReadFullPartnerNodeInfos(log, partnerWeights, partnerDir) + require.NoError(t, err) + + allNodeIds := make(flow.IdentityList, 0) + for _, node := range internalNodes { + allNodeIds = append(allNodeIds, node.Identity()) + } + for _, node := range partnerNodes { + allNodeIds = append(allNodeIds, node.Identity()) + } + // create a root snapshot - rootSnapshot := unittest.RootSnapshotFixture(unittest.IdentityListFixture(10, unittest.WithAllRoles())) + rootSnapshot := unittest.RootSnapshotFixture(allNodeIds) snapshotFn := func() *inmem.Snapshot { return rootSnapshot } @@ -26,24 +41,21 @@ func TestRecoverEpochHappyPath(t *testing.T) { stdout := bytes.NewBuffer(nil) generateRecoverEpochTxArgsCmd.SetOut(stdout) - flagPartnerWeights = partnerWeights - flagPartnerNodeInfoDir = partnerDir flagInternalNodePrivInfoDir = internalPrivDir flagNodeConfigJson = configPath flagCollectionClusters = 2 - flagStartView = 1000 - flagStakingEndView = 2000 - flagEndView = 4000 + flagNumViewsInEpoch = 4000 + flagNumViewsInStakingAuction = 100 + flagEpochCounter = 2 generateRecoverEpochTxArgs(snapshotFn)(generateRecoverEpochTxArgsCmd, nil) // read output from stdout var outputTxArgs []interface{} - err := json.NewDecoder(stdout).Decode(&outputTxArgs) + err = json.NewDecoder(stdout).Decode(&outputTxArgs) require.NoError(t, err) // compare to expected values expectedArgs := extractRecoverEpochArgs(rootSnapshot) - - unittest.VerifyCdcArguments(t, expectedArgs, outputTxArgs) + unittest.VerifyCdcArguments(t, expectedArgs[:len(expectedArgs)-2], outputTxArgs[:len(expectedArgs)-2]) }) } From 3a8ec88c685d70ab7378df7d7eee62a50fcfa4fb Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 2 Apr 2024 13:22:49 -0400 Subject: [PATCH 050/148] remove debug logs --- cmd/util/cmd/common/clusters.go | 2 -- cmd/util/cmd/epochs/cmd/recover_test.go | 3 ++- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index 2a335358c16..6385334d736 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -130,10 +130,8 @@ func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterLis func ConvertClusterAssignmentsCdc(assignments flow.AssignmentList) cadence.Array { assignmentsCdc := make([]cadence.Value, len(assignments)) for i, asmt := range assignments { - fmt.Println(asmt.Len()) vals := make([]cadence.Value, asmt.Len()) for j, k := range asmt { - fmt.Println(k.String()) vals[j] = cadence.String(k.String()) } assignmentsCdc[i] = cadence.NewArray(vals).WithType(cadence.NewVariableSizedArrayType(cadence.StringType{})) diff --git a/cmd/util/cmd/epochs/cmd/recover_test.go b/cmd/util/cmd/epochs/cmd/recover_test.go index c20e1fffe1e..0a5eec84788 100644 --- a/cmd/util/cmd/epochs/cmd/recover_test.go +++ b/cmd/util/cmd/epochs/cmd/recover_test.go @@ -56,6 +56,7 @@ func TestRecoverEpochHappyPath(t *testing.T) { require.NoError(t, err) // compare to expected values expectedArgs := extractRecoverEpochArgs(rootSnapshot) - unittest.VerifyCdcArguments(t, expectedArgs[:len(expectedArgs)-2], outputTxArgs[:len(expectedArgs)-2]) + unittest.VerifyCdcArguments(t, expectedArgs[:len(expectedArgs)-1], outputTxArgs[:len(expectedArgs)-1]) + // @TODO validate cadence values for generated cluster assignments and clusters }) } From 0060ce9a304bbb54ed3d7860807837ea665d6df0 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 3 Apr 2024 12:39:46 -0400 Subject: [PATCH 051/148] Update cmd/util/cmd/common/clusters.go Co-authored-by: Jordan Schalm --- cmd/util/cmd/common/clusters.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index 6385334d736..efae389e7bc 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -98,9 +98,9 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes // ConstructRootQCsForClusters constructs a root QC for each cluster in the list. // Args: // - log: the logger instance. -// - clusterList: identity list of partner nodes. -// - nodeInfos: identity list of internal nodes. -// - clusterBlocks: the number of collectors in each generated cluster. +// - clusterList: list of clusters +// - nodeInfos: list of NodeInfos (must contain all internal nodes) +// - clusterBlocks: list of root blocks for each cluster // Returns: // - flow.AssignmentList: the generated assignment list. // - flow.ClusterList: the generate collection cluster list. From 3a2b99e20ef511137556b3df7878936002ea3efd Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 3 Apr 2024 12:39:58 -0400 Subject: [PATCH 052/148] Update cmd/util/cmd/common/clusters.go Co-authored-by: Jordan Schalm --- cmd/util/cmd/common/clusters.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index efae389e7bc..4af295d250a 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -104,7 +104,6 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes // Returns: // - flow.AssignmentList: the generated assignment list. // - flow.ClusterList: the generate collection cluster list. -// - error: if any error occurs. Any error returned from this function is irrecoverable. func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block) []*flow.QuorumCertificate { if len(clusterBlocks) != len(clusterList) { From fcb1006c60e37584b2fad79b0cb0d2464b2a80a5 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 3 Apr 2024 12:43:55 -0400 Subject: [PATCH 053/148] Apply suggestions from code review Co-authored-by: Jordan Schalm --- cmd/util/cmd/epochs/cmd/recover.go | 24 +++++++++++++++++++---- utils/unittest/service_events_fixtures.go | 2 +- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 19c609e3a0b..b8d7086d2ed 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -19,11 +19,20 @@ import ( // The full epoch data must be generated manually and submitted with this transaction in order for an // EpochRecover event to be emitted. This command retrieves the current protocol state identities, computes the cluster assignment using those // identities, generates the cluster QC's and retrieves the DKG key vector of the last successful epoch. +// This recovery process has some constraints: +// - The RecoveryEpoch must have exactly the same consensus committee as participated in the most recent successful DKG. +// - The RecoveryEpoch must contain enough "internal" collection nodes so that all clusters contain a supermajority of "internal" collection nodes (same constraint as sporks) var ( generateRecoverEpochTxArgsCmd = &cobra.Command{ Use: "efm-recover-tx-args", Short: "Generates recover epoch transaction arguments", - Long: "Generates transaction arguments for the epoch recovery transaction.", + Long: ` +Generates transaction arguments for the epoch recovery transaction. +The epoch recovery transaction is used to recover from any failure in the epoch transition process without requiring a spork. +This recovery process has some constraints: +- The RecoveryEpoch must have exactly the same consensus committee as participated in the most recent successful DKG. +- The RecoveryEpoch must contain enough "internal" collection nodes so that all clusters contain a supermajority of "internal" collection nodes (same constraint as sporks) +`, Run: generateRecoverEpochTxArgs(getSnapshot), } @@ -46,13 +55,18 @@ func addGenerateRecoverEpochTxArgsCmdFlags() { generateRecoverEpochTxArgsCmd.Flags().IntVar(&flagCollectionClusters, "collection-clusters", 3, "number of collection clusters") // required parameters for network configuration and generation of root node identities - generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagNodeConfigJson, "node-config", "", + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagNodeConfigJson, "config", "", "path to a JSON file containing multiple node configurations (fields Role, Address, Weight)") generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory "+ "containing the output from the `keygen` command for internal nodes") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInEpoch, "epoch-length", 4000, "length of each epoch measured in views") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInStakingAuction, "epoch-staking-phase-length", 100, "length of the epoch staking phase measured in views") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "the epoch counter used to generate the root cluster block") + + generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-length") + generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-staking-phase-length") + generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-counter") + generateRecoverEpochTxArgsCmd.MarkFlagRequired("collection-clusters") } func getSnapshot() *inmem.Snapshot { @@ -97,11 +111,11 @@ func generateRecoverEpochTxArgs(getSnapshot func() *inmem.Snapshot) func(cmd *co } } -// extractResetEpochArgs extracts the required transaction arguments for the `resetEpoch` transaction +// extractRecoverEpochArgs extracts the required transaction arguments for the `recoverEpoch` transaction. func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { epoch := snapshot.Epochs().Current() - ids, err := snapshot.Identities(filter.IsValidProtocolParticipant) + currentEpochIdentities, err := snapshot.Identities(filter.IsValidProtocolParticipant) if err != nil { log.Fatal().Err(err).Msg("failed to get valid protocol participants from snapshot") } @@ -158,6 +172,8 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { dkgPubKeys := make([]cadence.Value, 0) nodeIds := make([]cadence.Value, 0) + // NOTE: The RecoveryEpoch will re-use the last successful DKG output. This means that the consensus + // committee in the RecoveryEpoch must be identical to the committee which participated in that DKG. dkgGroupKeyCdc, cdcErr := cadence.NewString(currentEpochDKG.GroupKey().String()) if cdcErr != nil { log.Fatal().Err(cdcErr).Msg("failed to get dkg group key cadence string") diff --git a/utils/unittest/service_events_fixtures.go b/utils/unittest/service_events_fixtures.go index 7f4ad955d56..e8fd351ab19 100644 --- a/utils/unittest/service_events_fixtures.go +++ b/utils/unittest/service_events_fixtures.go @@ -6,7 +6,7 @@ import ( "encoding/json" "testing" - json2 "github.com/onflow/cadence/encoding/json" + jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" From 340d3ab4a0db009dbdb79d1b3f4aa8ea999c949e Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Wed, 3 Apr 2024 12:45:05 -0400 Subject: [PATCH 054/148] Apply suggestions from code review Co-authored-by: Jordan Schalm --- cmd/util/cmd/common/clusters.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index 4af295d250a..669cec40283 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -32,7 +32,7 @@ import ( // - log: the logger instance. // - partnerNodes: identity list of partner nodes. // - internalNodes: identity list of internal nodes. -// - numCollectionClusters: the number of collectors in each generated cluster. +// - numCollectionClusters: the number of clusters to generate // Returns: // - flow.AssignmentList: the generated assignment list. // - flow.ClusterList: the generate collection cluster list. @@ -125,13 +125,13 @@ func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterLis return qcs } -// ConvertClusterAssignmentsCdc converts golang cluster assignments type to cadence array of arrays. +// ConvertClusterAssignmentsCdc converts golang cluster assignments type to Cadence type `[[String]]`. func ConvertClusterAssignmentsCdc(assignments flow.AssignmentList) cadence.Array { assignmentsCdc := make([]cadence.Value, len(assignments)) for i, asmt := range assignments { vals := make([]cadence.Value, asmt.Len()) - for j, k := range asmt { - vals[j] = cadence.String(k.String()) + for j, nodeID := range asmt { + vals[j] = cadence.String(nodeID.String()) } assignmentsCdc[i] = cadence.NewArray(vals).WithType(cadence.NewVariableSizedArrayType(cadence.StringType{})) } @@ -139,7 +139,7 @@ func ConvertClusterAssignmentsCdc(assignments flow.AssignmentList) cadence.Array return cadence.NewArray(assignmentsCdc).WithType(cadence.NewVariableSizedArrayType(cadence.NewVariableSizedArrayType(cadence.StringType{}))) } -// ConvertClusterQcsCdc converts golang cluster qcs type to cadence struct. +// ConvertClusterQcsCdc converts cluster QCs from `QuorumCertificate` type to `ClusterQCVoteData` type. func ConvertClusterQcsCdc(qcs []*flow.QuorumCertificate, clusterList flow.ClusterList) ([]*flow.ClusterQCVoteData, error) { voteData := make([]*flow.ClusterQCVoteData, len(qcs)) for i, qc := range qcs { From d73ca606df73b0e251831d0e5de3d41647e15aac Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Wed, 3 Apr 2024 19:46:22 +0300 Subject: [PATCH 055/148] Implemented subscribe blocks, headers, digests endpoints for observer --- cmd/observer/node_builder/observer_builder.go | 20 +++++++++ engine/access/apiproxy/access_api_proxy.go | 45 +++++++++++-------- 2 files changed, 47 insertions(+), 18 deletions(-) diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index 9da165f400d..1d710e5e226 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -1654,6 +1654,19 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { ), } + broadcaster := engine.NewBroadcaster() + // create BlockTracker that will track for new blocks (finalized and sealed) and + // handles block-related operations. + blockTracker, err := subscription.NewBlockTracker( + node.State, + builder.FinalizedRootBlock.Header.Height, + node.Storage.Headers, + broadcaster, + ) + if err != nil { + return nil, fmt.Errorf("failed to initialize block tracker: %w", err) + } + backendParams := backend.Params{ State: node.State, Blocks: node.Storage.Blocks, @@ -1672,6 +1685,13 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { Log: node.Logger, SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, Communicator: backend.NewNodeCommunicator(backendConfig.CircuitBreakerConfig.Enabled), + BlockTracker: blockTracker, + SubscriptionParams: backend.SubscriptionParams{ + Broadcaster: broadcaster, + SendTimeout: builder.stateStreamConf.ClientSendTimeout, + ResponseLimit: builder.stateStreamConf.ResponseLimit, + SendBufferSize: int(builder.stateStreamConf.ClientSendBufferSize), + }, } if builder.localServiceAPIEnabled { diff --git a/engine/access/apiproxy/access_api_proxy.go b/engine/access/apiproxy/access_api_proxy.go index d57f1681700..e2da8a9b0d3 100644 --- a/engine/access/apiproxy/access_api_proxy.go +++ b/engine/access/apiproxy/access_api_proxy.go @@ -347,48 +347,57 @@ func (h *FlowAccessAPIRouter) GetExecutionResultByID(context context.Context, re } func (h *FlowAccessAPIRouter) SubscribeBlocksFromStartBlockID(req *access.SubscribeBlocksFromStartBlockIDRequest, server access.AccessAPI_SubscribeBlocksFromStartBlockIDServer) error { - // SubscribeBlocksFromStartBlockID is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlocksFromStartBlockID not implemented") + err := h.local.SubscribeBlocksFromStartBlockID(req, server) + h.log(LocalApiService, "SubscribeBlocksFromStartBlockID", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlocksFromStartHeight(req *access.SubscribeBlocksFromStartHeightRequest, server access.AccessAPI_SubscribeBlocksFromStartHeightServer) error { - // SubscribeBlocksFromStartHeight is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlocksFromStartHeight not implemented") + err := h.local.SubscribeBlocksFromStartHeight(req, server) + h.log(LocalApiService, "SubscribeBlocksFromStartHeight", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlocksFromLatest(req *access.SubscribeBlocksFromLatestRequest, server access.AccessAPI_SubscribeBlocksFromLatestServer) error { - // SubscribeBlocksFromLatest is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlocksFromLatest not implemented") + err := h.local.SubscribeBlocksFromLatest(req, server) + h.log(LocalApiService, "SubscribeBlocksFromLatest", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlockHeadersFromStartBlockID(req *access.SubscribeBlockHeadersFromStartBlockIDRequest, server access.AccessAPI_SubscribeBlockHeadersFromStartBlockIDServer) error { - // SubscribeBlockHeadersFromStartBlockID is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlockHeadersFromStartBlockID not implemented") + err := h.local.SubscribeBlockHeadersFromStartBlockID(req, server) + h.log(LocalApiService, "SubscribeBlockHeadersFromStartBlockID", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlockHeadersFromStartHeight(req *access.SubscribeBlockHeadersFromStartHeightRequest, server access.AccessAPI_SubscribeBlockHeadersFromStartHeightServer) error { - // SubscribeBlockHeadersFromStartHeight is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlockHeadersFromStartHeight not implemented") + err := h.local.SubscribeBlockHeadersFromStartHeight(req, server) + h.log(LocalApiService, "SubscribeBlockHeadersFromStartHeight", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlockHeadersFromLatest(req *access.SubscribeBlockHeadersFromLatestRequest, server access.AccessAPI_SubscribeBlockHeadersFromLatestServer) error { - // SubscribeBlockHeadersFromLatest is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlockHeadersFromLatest not implemented") + err := h.local.SubscribeBlockHeadersFromLatest(req, server) + h.log(LocalApiService, "SubscribeBlockHeadersFromLatest", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlockDigestsFromStartBlockID(req *access.SubscribeBlockDigestsFromStartBlockIDRequest, server access.AccessAPI_SubscribeBlockDigestsFromStartBlockIDServer) error { - // SubscribeBlockDigestsFromStartBlockID is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlockDigestsFromStartBlockID not implemented") + err := h.local.SubscribeBlockDigestsFromStartBlockID(req, server) + h.log(LocalApiService, "SubscribeBlockDigestsFromStartBlockID", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlockDigestsFromStartHeight(req *access.SubscribeBlockDigestsFromStartHeightRequest, server access.AccessAPI_SubscribeBlockDigestsFromStartHeightServer) error { - // SubscribeBlockDigestsFromStartHeight is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlockDigestsFromStartHeight not implemented") + err := h.local.SubscribeBlockDigestsFromStartHeight(req, server) + h.log(LocalApiService, "SubscribeBlockDigestsFromStartHeight", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlockDigestsFromLatest(req *access.SubscribeBlockDigestsFromLatestRequest, server access.AccessAPI_SubscribeBlockDigestsFromLatestServer) error { - // SubscribeBlockDigestsFromLatest is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlockDigestsFromLatest not implemented") + err := h.local.SubscribeBlockDigestsFromLatest(req, server) + h.log(LocalApiService, "SubscribeBlockDigestsFromLatest", err) + return err } func (h *FlowAccessAPIRouter) SendAndSubscribeTransactionStatuses(req *access.SendAndSubscribeTransactionStatusesRequest, server access.AccessAPI_SendAndSubscribeTransactionStatusesServer) error { From 78e7b22566872ced6c19c7a42903afac473dc547 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Wed, 3 Apr 2024 19:47:22 +0300 Subject: [PATCH 056/148] Refactored grpc_state_stream_test --- .../access/cohort3/grpc_state_stream_test.go | 125 +++++++++++------- 1 file changed, 78 insertions(+), 47 deletions(-) diff --git a/integration/tests/access/cohort3/grpc_state_stream_test.go b/integration/tests/access/cohort3/grpc_state_stream_test.go index 0614b78f2e2..e946c2de9ae 100644 --- a/integration/tests/access/cohort3/grpc_state_stream_test.go +++ b/integration/tests/access/cohort3/grpc_state_stream_test.go @@ -145,13 +145,27 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { time.Sleep(20 * time.Second) - testANEvents, testANErrs, err := SubscribeEventsByBlockHeight(s.ctx, sdkClientTestAN, 0, &executiondata.EventFilter{}) + // TODO: will be refactored after https://github.com/onflow/flow-go/pull/5602 merged + req := &executiondata.SubscribeEventsRequest{ + StartBlockHeight: 0, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + Filter: &executiondata.EventFilter{}, + HeartbeatInterval: 1, + } + + testANStream, err := sdkClientTestAN.SubscribeEvents(s.ctx, req) + s.Require().NoError(err) + testANEvents, testANErrs, err := SubscribeHandler(s.ctx, testANStream.Recv, eventsResponseHandler) s.Require().NoError(err) - controlANEvents, controlANErrs, err := SubscribeEventsByBlockHeight(s.ctx, sdkClientControlAN, 0, &executiondata.EventFilter{}) + controlANStream, err := sdkClientControlAN.SubscribeEvents(s.ctx, req) + s.Require().NoError(err) + controlANEvents, controlANErrs, err := SubscribeHandler(s.ctx, controlANStream.Recv, eventsResponseHandler) s.Require().NoError(err) - testONEvents, testONErrs, err := SubscribeEventsByBlockHeight(s.ctx, sdkClientTestON, 0, &executiondata.EventFilter{}) + testONStream, err := sdkClientTestON.SubscribeEvents(s.ctx, req) + s.Require().NoError(err) + testONEvents, testONErrs, err := SubscribeHandler(s.ctx, testONStream.Recv, eventsResponseHandler) s.Require().NoError(err) txCount := 10 @@ -185,7 +199,8 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { foundANTxCount := 0 foundONTxCount := 0 - r := newResponseTracker() + r := NewResponseTracker(compareEventsResponse) + for { select { case err := <-testANErrs: @@ -219,44 +234,75 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { } } -type ResponseTracker struct { - r map[uint64]map[string]flow.BlockEvents - mu sync.RWMutex +// ResponseTracker is a generic tracker for responses. +type ResponseTracker[T any] struct { + r map[uint64]map[string]T + mu sync.RWMutex + compare func(t *testing.T, responses map[uint64]map[string]T, blockHeight uint64) error //func(control, test T) error } -func newResponseTracker() *ResponseTracker { - return &ResponseTracker{ - r: make(map[uint64]map[string]flow.BlockEvents), +// NewResponseTracker creates a new ResponseTracker. +func NewResponseTracker[T any]( + compare func(t *testing.T, responses map[uint64]map[string]T, blockHeight uint64) error, +) *ResponseTracker[T] { + return &ResponseTracker[T]{ + r: make(map[uint64]map[string]T), + compare: compare, } } -func (r *ResponseTracker) Add(t *testing.T, blockHeight uint64, name string, events *flow.BlockEvents) { +func (r *ResponseTracker[T]) Add(t *testing.T, blockHeight uint64, name string, response T) { r.mu.Lock() defer r.mu.Unlock() if _, ok := r.r[blockHeight]; !ok { - r.r[blockHeight] = make(map[string]flow.BlockEvents) + r.r[blockHeight] = make(map[string]T) + } + r.r[blockHeight][name] = response + + err := r.compare(t, r.r, blockHeight) + if err != nil { + log.Fatalf("comparison error at block height %d: %v", blockHeight, err) } - r.r[blockHeight][name] = *events - if len(r.r[blockHeight]) != 3 { - return + delete(r.r, blockHeight) +} + +func eventsResponseHandler(msg *executiondata.SubscribeEventsResponse) (flow.BlockEvents, error) { + events := convert.MessagesToEvents(msg.GetEvents()) + + return flow.BlockEvents{ + BlockHeight: msg.GetBlockHeight(), + BlockID: convert.MessageToIdentifier(msg.GetBlockId()), + Events: events, + BlockTimestamp: msg.GetBlockTimestamp().AsTime(), + }, nil +} + +func compareEventsResponse(t *testing.T, responses map[uint64]map[string]*flow.BlockEvents, blockHeight uint64) error { + if len(responses[blockHeight]) != 3 { + return nil } + accessControlData := responses[blockHeight]["access_control"] + accessTestData := responses[blockHeight]["access_test"] + observerTestData := responses[blockHeight]["observer_test"] - err := r.compare(t, r.r[blockHeight]["access_control"], r.r[blockHeight]["access_test"]) + // Compare access_control with access_test + err := compareEvents(t, accessControlData, accessTestData) if err != nil { - log.Fatalf("failure comparing access and access data %d: %v", blockHeight, err) + return fmt.Errorf("failure comparing access and access data: %d: %v", blockHeight, err) } - err = r.compare(t, r.r[blockHeight]["access_control"], r.r[blockHeight]["observer_test"]) + // Compare access_control with observer_test + err = compareEvents(t, accessControlData, observerTestData) if err != nil { - log.Fatalf("failure comparing access and observer data %d: %v", blockHeight, err) + return fmt.Errorf("failure comparing access and observer data: %d: %v", blockHeight, err) } - delete(r.r, blockHeight) + return nil } -func (r *ResponseTracker) compare(t *testing.T, controlData flow.BlockEvents, testData flow.BlockEvents) error { +func compareEvents(t *testing.T, controlData, testData *flow.BlockEvents) error { require.Equal(t, controlData.BlockID, testData.BlockID) require.Equal(t, controlData.BlockHeight, testData.BlockHeight) require.Equal(t, len(controlData.Events), len(testData.Events)) @@ -283,24 +329,12 @@ func getClient(address string) (executiondata.ExecutionDataAPIClient, error) { return executiondata.NewExecutionDataAPIClient(conn), nil } -func SubscribeEventsByBlockHeight( +func SubscribeHandler[T any, V any]( ctx context.Context, - client executiondata.ExecutionDataAPIClient, - startHeight uint64, - filter *executiondata.EventFilter, -) (<-chan flow.BlockEvents, <-chan error, error) { - req := &executiondata.SubscribeEventsRequest{ - StartBlockHeight: startHeight, - EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, - Filter: filter, - HeartbeatInterval: 1, - } - stream, err := client.SubscribeEvents(ctx, req) - if err != nil { - return nil, nil, err - } - - sub := make(chan flow.BlockEvents) + recv func() (T, error), + responseHandler func(T) (V, error), +) (<-chan V, <-chan error, error) { + sub := make(chan V) errChan := make(chan error) sendErr := func(err error) { @@ -315,23 +349,20 @@ func SubscribeEventsByBlockHeight( defer close(errChan) for { - resp, err := stream.Recv() + t, err := recv() if err != nil { if err == io.EOF { return } - sendErr(fmt.Errorf("error receiving event: %w", err)) + sendErr(fmt.Errorf("error receiving block: %w", err)) return } - events := convert.MessagesToEvents(resp.GetEvents()) - - response := flow.BlockEvents{ - BlockHeight: resp.GetBlockHeight(), - BlockID: convert.MessageToIdentifier(resp.GetBlockId()), - Events: events, - BlockTimestamp: resp.GetBlockTimestamp().AsTime(), + response, err := responseHandler(t) + if err != nil { + sendErr(fmt.Errorf("error converting response: %w", err)) + return } select { From b00fb702c2d5cfafa393eef0df54f845b8df8c83 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Wed, 3 Apr 2024 19:48:04 +0300 Subject: [PATCH 057/148] Added integration test for access and observer block streaming --- .../cohort3/grpc_streaming_blocks_test.go | 255 ++++++++++++++++++ 1 file changed, 255 insertions(+) create mode 100644 integration/tests/access/cohort3/grpc_streaming_blocks_test.go diff --git a/integration/tests/access/cohort3/grpc_streaming_blocks_test.go b/integration/tests/access/cohort3/grpc_streaming_blocks_test.go new file mode 100644 index 00000000000..950db06047f --- /dev/null +++ b/integration/tests/access/cohort3/grpc_streaming_blocks_test.go @@ -0,0 +1,255 @@ +package cohort3 + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +func TestGrpcBlocksStream(t *testing.T) { + suite.Run(t, new(GrpcBlocksStreamSuite)) +} + +type GrpcBlocksStreamSuite struct { + suite.Suite + + log zerolog.Logger + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork + + // RPC methods to test + testedRPCs func() []RPCTest +} + +func (s *GrpcBlocksStreamSuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msg("================> Finish TearDownTest") +} + +func (s *GrpcBlocksStreamSuite) SetupTest() { + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + // access node + accessConfig := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + testnet.WithAdditionalFlag("--execution-data-retry-delay=1s"), + testnet.WithAdditionalFlag("--execution-data-indexing-enabled=true"), + testnet.WithAdditionalFlagf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + testnet.WithAdditionalFlag("--event-query-mode=local-only"), + testnet.WithAdditionalFlag("--supports-observer=true"), + testnet.WithAdditionalFlagf("--public-network-execution-data-sync-enabled=true"), + ) + + consensusConfigs := []func(config *testnet.NodeConfig){ + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=400ms"), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", 1)), + testnet.WithLogLevel(zerolog.FatalLevel), + } + + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + accessConfig, + } + + // add the observer node config + observers := []testnet.ObserverConfig{{ + ContainerName: testnet.PrimaryON, + LogLevel: zerolog.DebugLevel, + AdditionalFlags: []string{ + fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + fmt.Sprintf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + "--execution-data-sync-enabled=true", + "--event-query-mode=execution-nodes-only", + "--execution-data-indexing-enabled=true", + }, + }} + + conf := testnet.NewNetworkConfig("access_blocks_streaming_test", nodeConfigs, testnet.WithObservers(observers...)) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + s.T().Logf("starting flow network with docker containers") + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.testedRPCs = s.getRPCs + + s.net.Start(s.ctx) +} + +// TestRestEventStreaming tests gRPC event streaming +func (s *GrpcBlocksStreamSuite) TestHappyPath() { + accessUrl := fmt.Sprintf("localhost:%s", s.net.ContainerByName(testnet.PrimaryAN).Port(testnet.GRPCPort)) + accessClient, err := getAccessAPIClient(accessUrl) + s.Require().NoError(err) + + observerURL := fmt.Sprintf("localhost:%s", s.net.ContainerByName(testnet.PrimaryON).Port(testnet.GRPCPort)) + observerClient, err := getAccessAPIClient(observerURL) + s.Require().NoError(err) + + txGenerator, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + s.Require().NoError(err) + header, err := txGenerator.GetLatestSealedBlockHeader(s.ctx) + s.Require().NoError(err) + + time.Sleep(20 * time.Second) + + var startValue interface{} + txCount := 10 + + for _, rpc := range s.testedRPCs() { + s.T().Run(rpc.name, func(t *testing.T) { + if rpc.name == "SubscribeBlocksFromStartBlockID" { + startValue = header.ID.Bytes() + } else { + startValue = header.Height + } + + accessStream, err := rpc.call(s.ctx, accessClient, startValue) + s.Require().NoError(err) + accessBlocks, accessBlockErrs, err := SubscribeHandler(s.ctx, accessStream.Recv, blockResponseHandler) + s.Require().NoError(err) + + observerStream, err := rpc.call(s.ctx, observerClient, startValue) + s.Require().NoError(err) + observerBlocks, observerBlockErrs, err := SubscribeHandler(s.ctx, observerStream.Recv, blockResponseHandler) + s.Require().NoError(err) + + foundANTxCount := 0 + foundONTxCount := 0 + + r := NewResponseTracker(compareBlocksResponse) + + for { + select { + case err := <-accessBlockErrs: + s.Require().NoErrorf(err, "unexpected AN error") + case _ = <-observerBlockErrs: + s.Require().NoErrorf(err, "unexpected ON error") + case block := <-accessBlocks: + s.T().Logf("AN block received: height: %d", block.Header.Height) + r.Add(s.T(), block.Header.Height, "access", block) + foundANTxCount++ + case block := <-observerBlocks: + s.T().Logf("ON block received: height: %d", block.Header.Height) + r.Add(s.T(), block.Header.Height, "observer", block) + foundONTxCount++ + } + + if foundANTxCount >= txCount && foundONTxCount >= txCount { + break + } + } + }) + } +} + +func blockResponseHandler(msg *accessproto.SubscribeBlocksResponse) (*flow.Block, error) { + return convert.MessageToBlock(msg.GetBlock()) +} + +func compareBlocksResponse(t *testing.T, responses map[uint64]map[string]*flow.Block, blockHeight uint64) error { + if len(responses[blockHeight]) != 2 { + return nil + } + accessData := responses[blockHeight]["access"] + observerData := responses[blockHeight]["observer"] + + // Compare access with observer + err := compareBlocks(t, accessData, observerData) + if err != nil { + return fmt.Errorf("failure comparing access and observer data: %d: %v", blockHeight, err) + } + + return nil +} + +func compareBlocks(t *testing.T, accessBlock *flow.Block, observerBlock *flow.Block) error { + require.Equal(t, accessBlock.ID(), observerBlock.ID()) + require.Equal(t, accessBlock.Header.Height, observerBlock.Header.Height) + require.Equal(t, accessBlock.Header.Timestamp, observerBlock.Header.Timestamp) + require.Equal(t, accessBlock.Payload.Hash(), observerBlock.Payload.Hash()) + + return nil +} + +type RPCTest struct { + name string + call func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) (accessproto.AccessAPI_SubscribeBlocksFromLatestClient, error) +} + +func (s *GrpcBlocksStreamSuite) getRPCs() []RPCTest { + return []RPCTest{ + { + name: "SubscribeBlocksFromLatest", + call: func(ctx context.Context, client accessproto.AccessAPIClient, _ interface{}) (accessproto.AccessAPI_SubscribeBlocksFromLatestClient, error) { + return client.SubscribeBlocksFromLatest(ctx, &accessproto.SubscribeBlocksFromLatestRequest{ + BlockStatus: entities.BlockStatus_BLOCK_FINALIZED, + FullBlockResponse: true, + }) + }, + }, + { + name: "SubscribeBlocksFromStartBlockID", + call: func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) (accessproto.AccessAPI_SubscribeBlocksFromLatestClient, error) { + return client.SubscribeBlocksFromStartBlockID(ctx, &accessproto.SubscribeBlocksFromStartBlockIDRequest{ + StartBlockId: startValue.([]byte), + BlockStatus: entities.BlockStatus_BLOCK_FINALIZED, + FullBlockResponse: true, + }) + }, + }, + { + name: "SubscribeBlocksFromStartHeight", + call: func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) (accessproto.AccessAPI_SubscribeBlocksFromLatestClient, error) { + return client.SubscribeBlocksFromStartHeight(ctx, &accessproto.SubscribeBlocksFromStartHeightRequest{ + StartBlockHeight: startValue.(uint64), + BlockStatus: entities.BlockStatus_BLOCK_FINALIZED, + FullBlockResponse: true, + }) + }, + }, + } +} + +func getAccessAPIClient(address string) (accessproto.AccessAPIClient, error) { + conn, err := grpc.Dial(address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err + } + + return accessproto.NewAccessAPIClient(conn), nil +} From 324d46d61ffa68a9c3d2f58a0ec78a4c3dcf35eb Mon Sep 17 00:00:00 2001 From: Andrii Slisarchuk Date: Wed, 3 Apr 2024 22:24:45 +0300 Subject: [PATCH 058/148] handle missing results --- access/api.go | 15 -- access/handler.go | 21 ++- .../backend/backend_stream_transactions.go | 153 ++++++++++------ .../backend_stream_transactions_test.go | 170 ++++++++---------- 4 files changed, 196 insertions(+), 163 deletions(-) diff --git a/access/api.go b/access/api.go index 72fe855f98b..3201796c6ed 100644 --- a/access/api.go +++ b/access/api.go @@ -212,21 +212,6 @@ type TransactionResult struct { BlockHeight uint64 } -// TransactionSubscribeInfo represents information about a subscribed transaction. -// It contains the ID of the transaction, its status, and the index of the associated message. -type TransactionSubscribeInfo struct { - Result *TransactionResult - MessageIndex uint64 -} - -// TransactionSubscribeInfoToMessage converts a TransactionSubscribeInfo struct to a protobuf message -func TransactionSubscribeInfoToMessage(data *TransactionSubscribeInfo) *access.SendAndSubscribeTransactionStatusesResponse { - return &access.SendAndSubscribeTransactionStatusesResponse{ - TransactionResults: TransactionResultToMessage(data.Result), - MessageIndex: data.MessageIndex, - } -} - func TransactionResultToMessage(result *TransactionResult) *access.TransactionResultResponse { return &access.TransactionResultResponse{ Status: entities.TransactionStatus(result.Status), diff --git a/access/handler.go b/access/handler.go index c050964fe25..adf6e944c8a 100644 --- a/access/handler.go +++ b/access/handler.go @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow/protobuf/go/flow/entities" @@ -1113,10 +1114,22 @@ func (h *Handler) SendAndSubscribeTransactionStatuses( } sub := h.api.SubscribeTransactionStatuses(ctx, &tx) - return subscription.HandleSubscription(sub, func(txSubInfo *TransactionSubscribeInfo) error { - err = stream.Send(TransactionSubscribeInfoToMessage(txSubInfo)) - if err != nil { - return rpc.ConvertError(err, "could not send response", codes.Internal) + + messageIndex := counters.NewMonotonousCounter(0) + return subscription.HandleSubscription(sub, func(txResults []*TransactionResult) error { + for i := range txResults { + value := messageIndex.Value() + if ok := messageIndex.Set(value + 1); !ok { + return status.Errorf(codes.Internal, "the message index has already been incremented to %d", messageIndex.Value()) + } + + err = stream.Send(&access.SendAndSubscribeTransactionStatusesResponse{ + TransactionResults: TransactionResultToMessage(txResults[i]), + MessageIndex: value, + }) + if err != nil { + return rpc.ConvertError(err, "could not send response", codes.Internal) + } } return nil diff --git a/engine/access/rpc/backend/backend_stream_transactions.go b/engine/access/rpc/backend/backend_stream_transactions.go index d5188178b65..2bdec081c06 100644 --- a/engine/access/rpc/backend/backend_stream_transactions.go +++ b/engine/access/rpc/backend/backend_stream_transactions.go @@ -16,7 +16,6 @@ import ( "github.com/onflow/flow-go/engine/access/subscription" "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/storage" @@ -39,9 +38,8 @@ type backendSubscribeTransactions struct { // TransactionSubscriptionMetadata holds data representing the status state for each transaction subscription. type TransactionSubscriptionMetadata struct { - txResult *access.TransactionResult + *access.TransactionResult txReferenceBlockID flow.Identifier - messageIndex counters.StrictMonotonousCounter blockWithTx *flow.Header txExecuted bool } @@ -55,13 +53,12 @@ func (b *backendSubscribeTransactions) SubscribeTransactionStatuses(ctx context. } txInfo := TransactionSubscriptionMetadata{ - txResult: &access.TransactionResult{ + TransactionResult: &access.TransactionResult{ TransactionID: tx.ID(), BlockID: flow.ZeroID, Status: flow.TransactionStatusUnknown, }, txReferenceBlockID: tx.ReferenceBlockID, - messageIndex: counters.NewMonotonousCounter(0), blockWithTx: nil, } @@ -80,31 +77,23 @@ func (b *backendSubscribeTransactions) SubscribeTransactionStatuses(ctx context. // subscription responses based on new blocks. func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *TransactionSubscriptionMetadata) func(context.Context, uint64) (interface{}, error) { return func(ctx context.Context, height uint64) (interface{}, error) { - // Get the highest available finalized block height - highestHeight, err := b.blockTracker.GetHighestHeight(flow.BlockStatusFinalized) + err := b.validateBlockHeight(height) if err != nil { - return nil, fmt.Errorf("could not get highest height for block %d: %w", height, err) - } - - // Fail early if no block finalized notification has been received for the given height. - // Note: It's possible that the block is locally finalized before the notification is - // received. This ensures a consistent view is available to all streams. - if height > highestHeight { - return nil, fmt.Errorf("block %d is not available yet: %w", height, subscription.ErrBlockNotReady) + return nil, err } // If the transaction status already reported the final status, return with no data available - if txInfo.txResult.Status == flow.TransactionStatusSealed || txInfo.txResult.Status == flow.TransactionStatusExpired { - return nil, fmt.Errorf("transaction final status %s was already reported: %w", txInfo.txResult.Status.String(), subscription.ErrEndOfData) + if txInfo.Status == flow.TransactionStatusSealed || txInfo.Status == flow.TransactionStatusExpired { + return nil, fmt.Errorf("transaction final status %s was already reported: %w", txInfo.Status.String(), subscription.ErrEndOfData) } // If on this step transaction block not available, search for it. if txInfo.blockWithTx == nil { // Search for transaction`s block information. txInfo.blockWithTx, - txInfo.txResult.BlockID, - txInfo.txResult.BlockHeight, - txInfo.txResult.CollectionID, + txInfo.BlockID, + txInfo.BlockHeight, + txInfo.CollectionID, err = b.searchForTransactionBlockInfo(height, txInfo) if err != nil { @@ -118,30 +107,31 @@ func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *Tran } } - // Find the transaction status. - var txStatus flow.TransactionStatus - var txResult *access.TransactionResult + // Get old status here, as it could be replaced by status from founded tx result + prevTxStatus := txInfo.Status - // If block with transaction was not found, get transaction status to check if it different from last status - if txInfo.blockWithTx == nil { - txStatus, err = b.txLocalDataProvider.DeriveUnknownTransactionStatus(txInfo.txReferenceBlockID) - } else { - // Check, if transaction executed and transaction result already available - if !txInfo.txExecuted { - txResult, err = b.searchForTransactionResult(ctx, txInfo.txResult.BlockID, txInfo.txResult.TransactionID) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get execution result for block %s: %v", txInfo.txResult.BlockID, err) - } - //Fill in execution status for future usages - txInfo.txExecuted = txResult != nil + // Check, if transaction executed and transaction result already available + if txInfo.blockWithTx != nil && !txInfo.txExecuted { + txResult, err := b.searchForTransactionResult(ctx, txInfo.BlockID, txInfo.TransactionID) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get execution result for block %s: %v", txInfo.BlockID, err) } // If transaction result was found, fully replace it in metadata. New transaction status already included in result. if txResult != nil { - txInfo.txResult = txResult - } else { - //If transaction result was not found or already filed in, get transaction status to check if it different from last status - txStatus, err = b.txLocalDataProvider.DeriveTransactionStatus(txInfo.txResult.BlockID, txInfo.blockWithTx.Height, txInfo.txExecuted) + txInfo.TransactionResult = txResult + //Fill in execution status for future usages + txInfo.txExecuted = true + } + } + + // If block with transaction was not found, get transaction status to check if it different from last status + if txInfo.blockWithTx == nil { + txInfo.Status, err = b.txLocalDataProvider.DeriveUnknownTransactionStatus(txInfo.txReferenceBlockID) + } else { + //If transaction result was not found, get transaction status to check if it different from last status + if txInfo.Status == prevTxStatus { + txInfo.Status, err = b.txLocalDataProvider.DeriveTransactionStatus(txInfo.BlockID, txInfo.blockWithTx.Height, txInfo.txExecuted) } } if err != nil { @@ -152,25 +142,82 @@ func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *Tran } // The same transaction status should not be reported, so return here with no response - if txInfo.txResult.Status == txStatus { + if prevTxStatus == txInfo.Status { return nil, nil } - // If the current transaction status different from the last available status, assign new status to result - if txResult == nil { - txInfo.txResult.Status = txStatus - } + return b.generateResultsWithMissingStatuses(txInfo, prevTxStatus) + } +} - messageIndex := txInfo.messageIndex.Value() - if ok := txInfo.messageIndex.Set(messageIndex + 1); !ok { - return nil, status.Errorf(codes.Internal, "the message index has already been incremented to %d", txInfo.messageIndex.Value()) - } +// generateResultsWithMissingStatuses checks if the current result differs from the previous result by more than one step. +// If yes, it generates results for the missing transaction statuses. This is done because the subscription should send +// responses for each of the statuses in the transaction lifecycle, and the message should be sent in the order of transaction statuses. +// Possible orders of transaction statuses: +// 1. pending(1) -> finalized(2) -> executed(3) -> sealed(4) +// 2. pending(1) -> expired(5) +func (b *backendSubscribeTransactions) generateResultsWithMissingStatuses( + txInfo *TransactionSubscriptionMetadata, + prevTxStatus flow.TransactionStatus, +) ([]*access.TransactionResult, error) { - return &access.TransactionSubscribeInfo{ - Result: txInfo.txResult, - MessageIndex: messageIndex, + // If the status is expired, which is the last status, return its result. + if txInfo.Status == flow.TransactionStatusExpired { + return []*access.TransactionResult{ + txInfo.TransactionResult, }, nil } + + var results []*access.TransactionResult + + // If the difference between statuses' values is more than one step, fill in the missing results. + if txInfo.Status-prevTxStatus > 1 { + for missingStatus := prevTxStatus + 1; missingStatus < txInfo.Status; missingStatus++ { + var missingTxResult *access.TransactionResult + switch missingStatus { + case flow.TransactionStatusPending: + missingTxResult = &access.TransactionResult{ + Status: missingStatus, + TransactionID: txInfo.TransactionID, + } + case flow.TransactionStatusFinalized: + missingTxResult = &access.TransactionResult{ + Status: missingStatus, + TransactionID: txInfo.TransactionID, + BlockID: txInfo.BlockID, + BlockHeight: txInfo.BlockHeight, + CollectionID: txInfo.CollectionID, + } + case flow.TransactionStatusExecuted: + missingTxResult = txInfo.TransactionResult + missingTxResult.Status = missingStatus + default: + return nil, fmt.Errorf("unexpected missing transaction status") + } + results = append(results, missingTxResult) + } + } + + results = append(results, txInfo.TransactionResult) + + return results, nil +} + +func (b *backendSubscribeTransactions) validateBlockHeight(height uint64) error { + // Get the highest available finalized block height + highestHeight, err := b.blockTracker.GetHighestHeight(flow.BlockStatusFinalized) + if err != nil { + return fmt.Errorf("could not get highest height for block %d: %w", height, err) + } + + // Fail early if no block finalized notification has been received for the given height. + // Note: It's possible that the block is locally finalized before the notification is + // received. This ensures a consistent view is available to all streams. + if height > highestHeight { + return fmt.Errorf("block %d is not available yet: %w", height, subscription.ErrBlockNotReady) + } + + return nil } // searchForTransactionBlockInfo searches for the block containing the specified transaction. @@ -187,7 +234,7 @@ func (b *backendSubscribeTransactions) searchForTransactionBlockInfo( return nil, flow.ZeroID, 0, flow.ZeroID, fmt.Errorf("error looking up block: %w", err) } - collectionID, err := b.txLocalDataProvider.LookupCollectionIDInBlock(block, txInfo.txResult.TransactionID) + collectionID, err := b.txLocalDataProvider.LookupCollectionIDInBlock(block, txInfo.TransactionID) if err != nil { return nil, flow.ZeroID, 0, flow.ZeroID, fmt.Errorf("error looking up transaction in block: %w", err) } @@ -199,7 +246,7 @@ func (b *backendSubscribeTransactions) searchForTransactionBlockInfo( return nil, flow.ZeroID, 0, flow.ZeroID, nil } -// searchForTransactionResult searches for the execution result of a block. It retrieves the execution result for the specified block ID. +// searchForTransactionResult searches for the transaction result of a block. It retrieves the execution result for the specified block ID. // Expected errors: // - codes.Internal if an internal error occurs while retrieving execution result. func (b *backendSubscribeTransactions) searchForTransactionResult( diff --git a/engine/access/rpc/backend/backend_stream_transactions_test.go b/engine/access/rpc/backend/backend_stream_transactions_test.go index 3a69ded6772..d7b6cd6f181 100644 --- a/engine/access/rpc/backend/backend_stream_transactions_test.go +++ b/engine/access/rpc/backend/backend_stream_transactions_test.go @@ -32,7 +32,6 @@ import ( "github.com/onflow/flow-go/engine/access/subscription" subscriptionmock "github.com/onflow/flow-go/engine/access/subscription/mock" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/metrics" protocol "github.com/onflow/flow-go/state/protocol/mock" storagemock "github.com/onflow/flow-go/storage/mock" @@ -139,16 +138,6 @@ func (s *TransactionStatusSuite) SetupTest() { s.reporter = syncmock.NewIndexReporter(s.T()) s.blocks.On("ByHeight", mock.AnythingOfType("uint64")).Return(mocks.StorageMapGetter(s.blockMap)) - s.blocks.On("ByID", mock.AnythingOfType("flow.Identifier")).Return(func(blockID flow.Identifier) (*flow.Block, error) { - for _, block := range s.blockMap { - if block.ID() == blockID { - return block, nil - } - } - - return nil, nil - }, nil) - s.state.On("Final").Return(s.finalSnapshot, nil) s.state.On("AtBlockID", mock.AnythingOfType("flow.Identifier")).Return(func(blockID flow.Identifier) protocolint.Snapshot { s.tempSnapshot.On("Head").Unset() @@ -180,11 +169,6 @@ func (s *TransactionStatusSuite) SetupTest() { }, nil) backendParams := s.backendParams() - s.reporter.On("LowestIndexedHeight").Return(s.rootBlock.Header.Height, nil) - s.reporter.On("HighestIndexedHeight").Return(func() (uint64, error) { - finalizedHeader := s.finalizedBlock.Header - return finalizedHeader.Height, nil - }, nil) err := backendParams.EventsIndex.Initialize(s.reporter) require.NoError(s.T(), err) err = backendParams.TxResultsIndex.Initialize(s.reporter) @@ -244,6 +228,20 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + s.reporter.On("LowestIndexedHeight").Return(s.rootBlock.Header.Height, nil) + s.reporter.On("HighestIndexedHeight").Return(func() (uint64, error) { + finalizedHeader := s.finalizedBlock.Header + return finalizedHeader.Height, nil + }, nil) + s.blocks.On("ByID", mock.AnythingOfType("flow.Identifier")).Return(func(blockID flow.Identifier) (*flow.Block, error) { + for _, block := range s.blockMap { + if block.ID() == blockID { + return block, nil + } + } + + return nil, nil + }, nil) s.sealedSnapshot.On("Head").Return(func() *flow.Header { return s.sealedBlock.Header }, nil) @@ -281,8 +279,6 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { mock.AnythingOfType("flow.Identifier"), ).Return(&txResult, nil) - expectedMsgIndexCounter := counters.NewMonotonousCounter(0) - // Create a special common function to read subscription messages from the channel and check converting it to transaction info // and check results for correctness checkNewSubscriptionMessage := func(sub subscription.Subscription, expectedTxStatus flow.TransactionStatus) { @@ -292,16 +288,13 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { "channel closed while waiting for transaction info:\n\t- txID %x\n\t- blockID: %x \n\t- err: %v", txId, s.finalizedBlock.ID(), sub.Err()) - txInfo, ok := v.(*accessapi.TransactionSubscribeInfo) + txResults, ok := v.([]*accessapi.TransactionResult) require.True(s.T(), ok, "unexpected response type: %T", v) + require.Len(s.T(), txResults, 1) - assert.Equal(s.T(), txId, txInfo.Result.TransactionID) - assert.Equal(s.T(), expectedTxStatus, txInfo.Result.Status) - - expectedMsgIndex := expectedMsgIndexCounter.Value() - assert.Equal(s.T(), expectedMsgIndex, txInfo.MessageIndex) - wasSet := expectedMsgIndexCounter.Set(expectedMsgIndex + 1) - require.True(s.T(), wasSet) + result := txResults[0] + assert.Equal(s.T(), txId, result.TransactionID) + assert.Equal(s.T(), expectedTxStatus, result.Status) }, 60*time.Second, fmt.Sprintf("timed out waiting for transaction info:\n\t- txID: %x\n\t- blockID: %x", txId, s.finalizedBlock.ID())) } @@ -341,68 +334,63 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") } -//// TestSubscribeTransactionStatusExpired tests the functionality of the SubscribeTransactionStatuses method in the Backend -//// when transaction become expired -//func (s *TransactionStatusSuite) TestSubscribeTransactionStatusExpired() { -// ctx, cancel := context.WithCancel(context.Background()) -// defer cancel() -// -// s.blocks.On("GetLastFullBlockHeight").Return(func() (uint64, error) { -// return s.sealedBlock.Header.Height, nil -// }, nil) -// -// // Generate sent transaction with ref block of the current finalized block -// transaction := unittest.TransactionFixture() -// transaction.SetReferenceBlockID(s.finalizedBlock.ID()) -// txId := transaction.ID() -// -// expectedMsgIndexCounter := counters.NewMonotonousCounter(0) -// -// // Create a special common function to read subscription messages from the channel and check converting it to transaction info -// // and check results for correctness -// checkNewSubscriptionMessage := func(sub subscription.Subscription, expectedTxStatus flow.TransactionStatus) { -// unittest.RequireReturnsBefore(s.T(), func() { -// v, ok := <-sub.Channel() -// require.True(s.T(), ok, -// "channel closed while waiting for transaction info:\n\t- txID %x\n\t- blockID: %x \n\t- err: %v", -// txId, s.finalizedBlock.ID(), sub.Err()) -// -// txInfo, ok := v.(*accessapi.TransactionSubscribeInfo) -// require.True(s.T(), ok, "unexpected response type: %T", v) -// -// assert.Equal(s.T(), txId, txInfo.Result.TransactionID) -// assert.Equal(s.T(), expectedTxStatus, txInfo.Result.Status) -// -// expectedMsgIndex := expectedMsgIndexCounter.Value() -// assert.Equal(s.T(), expectedMsgIndex, txInfo.MessageIndex) -// wasSet := expectedMsgIndexCounter.Set(expectedMsgIndex + 1) -// require.True(s.T(), wasSet) -// }, time.Second, fmt.Sprintf("timed out waiting for transaction info:\n\t- txID: %x\n\t- blockID: %x", txId, s.finalizedBlock.ID())) -// } -// -// // Subscribe to transaction status and receive the first message with pending status -// sub := s.backend.SubscribeTransactionStatuses(ctx, &transaction.TransactionBody) -// checkNewSubscriptionMessage(sub, flow.TransactionStatusPending) -// -// // Generate 600 blocks without transaction included and check, that transaction still pending -// startHeight := s.finalizedBlock.Header.Height + 1 -// lastHeight := startHeight + flow.DefaultTransactionExpiry -// -// for i := startHeight; i <= lastHeight; i++ { -// s.sealedBlock = s.finalizedBlock -// s.addNewFinalizedBlock(s.sealedBlock.Header, false) -// } -// -// // Generate final blocks and check transaction expired -// s.sealedBlock = s.finalizedBlock -// s.addNewFinalizedBlock(s.sealedBlock.Header, true) -// checkNewSubscriptionMessage(sub, flow.TransactionStatusExpired) -// -// // Ensure subscription shuts down gracefully -// unittest.RequireReturnsBefore(s.T(), func() { -// v, ok := <-sub.Channel() -// assert.Nil(s.T(), v) -// assert.False(s.T(), ok) -// assert.NoError(s.T(), sub.Err()) -// }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") -//} +// TestSubscribeTransactionStatusExpired tests the functionality of the SubscribeTransactionStatuses method in the Backend +// when transaction become expired +func (s *TransactionStatusSuite) TestSubscribeTransactionStatusExpired() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.blocks.On("GetLastFullBlockHeight").Return(func() (uint64, error) { + return s.sealedBlock.Header.Height, nil + }, nil) + + // Generate sent transaction with ref block of the current finalized block + transaction := unittest.TransactionFixture() + transaction.SetReferenceBlockID(s.finalizedBlock.ID()) + txId := transaction.ID() + + // Create a special common function to read subscription messages from the channel and check converting it to transaction info + // and check results for correctness + checkNewSubscriptionMessage := func(sub subscription.Subscription, expectedTxStatus flow.TransactionStatus) { + unittest.RequireReturnsBefore(s.T(), func() { + v, ok := <-sub.Channel() + require.True(s.T(), ok, + "channel closed while waiting for transaction info:\n\t- txID %x\n\t- blockID: %x \n\t- err: %v", + txId, s.finalizedBlock.ID(), sub.Err()) + + txResults, ok := v.([]*accessapi.TransactionResult) + require.True(s.T(), ok, "unexpected response type: %T", v) + require.Len(s.T(), txResults, 1) + + result := txResults[0] + assert.Equal(s.T(), txId, result.TransactionID) + assert.Equal(s.T(), expectedTxStatus, result.Status) + }, time.Second, fmt.Sprintf("timed out waiting for transaction info:\n\t- txID: %x\n\t- blockID: %x", txId, s.finalizedBlock.ID())) + } + + // Subscribe to transaction status and receive the first message with pending status + sub := s.backend.SubscribeTransactionStatuses(ctx, &transaction.TransactionBody) + checkNewSubscriptionMessage(sub, flow.TransactionStatusPending) + + // Generate 600 blocks without transaction included and check, that transaction still pending + startHeight := s.finalizedBlock.Header.Height + 1 + lastHeight := startHeight + flow.DefaultTransactionExpiry + + for i := startHeight; i <= lastHeight; i++ { + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.Header, false) + } + + // Generate final blocks and check transaction expired + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.Header, true) + checkNewSubscriptionMessage(sub, flow.TransactionStatusExpired) + + // Ensure subscription shuts down gracefully + unittest.RequireReturnsBefore(s.T(), func() { + v, ok := <-sub.Channel() + assert.Nil(s.T(), v) + assert.False(s.T(), ok) + assert.NoError(s.T(), sub.Err()) + }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") +} From 53199a4c8451052649872a07c1f2a200fa825a5c Mon Sep 17 00:00:00 2001 From: Andrii Slisarchuk Date: Wed, 3 Apr 2024 22:58:04 +0300 Subject: [PATCH 059/148] Added checks to integration test --- integration/go.mod | 2 ++ integration/go.sum | 4 ++-- .../tests/access/cohort1/access_api_test.go | 14 ++++++++------ 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/integration/go.mod b/integration/go.mod index 5be02d5e841..03c071a5b2f 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -361,3 +361,5 @@ require ( replace github.com/onflow/flow-go => ../ replace github.com/onflow/flow-go/insecure => ../insecure + +replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240305102946-3efec6679252 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240326125130-d668d54a6c4c diff --git a/integration/go.sum b/integration/go.sum index 871681d7240..af447da7745 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -112,6 +112,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240326125130-d668d54a6c4c h1:TUP9qXmzeERCNZ5HAxh99epDSnfxA7Ksyv/71n+avdw= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240326125130-d668d54a6c4c/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= @@ -1425,8 +1427,6 @@ github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8 github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= -github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240305102946-3efec6679252 h1:W0xm80Qc5RkFJw7yQIj7OiMacCZw3et/tx/5N9rN2qk= -github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240305102946-3efec6679252/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= github.com/onflow/nft-storefront/lib/go/contracts v0.0.0-20221222181731-14b90207cead h1:2j1Unqs76Z1b95Gu4C3Y28hzNUHBix7wL490e61SMSw= diff --git a/integration/tests/access/cohort1/access_api_test.go b/integration/tests/access/cohort1/access_api_test.go index 1cbf5b191c4..85de46bbd7b 100644 --- a/integration/tests/access/cohort1/access_api_test.go +++ b/integration/tests/access/cohort1/access_api_test.go @@ -283,7 +283,7 @@ func (s *AccessAPISuite) TestSendAndSubscribeTransactionStatuses() { s.Require().NoError(err) expectedCounter := uint64(0) - var finalTxStatus entities.TransactionStatus + lastReportedTxStatus := entities.TransactionStatus_UNKNOWN var txID sdk.Identifier for { @@ -297,17 +297,19 @@ func (s *AccessAPISuite) TestSendAndSubscribeTransactionStatuses() { } if txID == sdk.EmptyID { - txID = sdk.Identifier(resp.GetId()) + txID = sdk.Identifier(resp.TransactionResults.TransactionId) } s.Assert().Equal(expectedCounter, resp.GetMessageIndex()) - s.Assert().Equal(txID, sdk.Identifier(resp.GetId())) + s.Assert().Equal(txID, sdk.Identifier(resp.TransactionResults.TransactionId)) + // Check if all statuses received one by one. The subscription should send responses for each of the statuses, + // and the message should be sent in the order of transaction statuses. + // Expected order: pending(1) -> finalized(2) -> executed(3) -> sealed(4) + s.Assert().Equal(lastReportedTxStatus, resp.TransactionResults.Status-1) expectedCounter++ - finalTxStatus = resp.Status + lastReportedTxStatus = resp.TransactionResults.Status } - - s.Assert().Equal(entities.TransactionStatus_SEALED, finalTxStatus) } func (s *AccessAPISuite) testGetAccount(client *client.Client) { From afd1ca5a1025f7456c647b28ce2ff1a2672d9eee Mon Sep 17 00:00:00 2001 From: Andrii Slisarchuk Date: Thu, 4 Apr 2024 10:36:04 +0300 Subject: [PATCH 060/148] Fixed insecure lint --- insecure/go.mod | 2 ++ insecure/go.sum | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/insecure/go.mod b/insecure/go.mod index a60c62cf447..45a55e5a3ac 100644 --- a/insecure/go.mod +++ b/insecure/go.mod @@ -304,3 +304,5 @@ require ( ) replace github.com/onflow/flow-go => ../ + +replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240403200719-b93bb12059cf diff --git a/insecure/go.sum b/insecure/go.sum index 2697bc58643..6fd8e1063c0 100644 --- a/insecure/go.sum +++ b/insecure/go.sum @@ -101,6 +101,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240403200719-b93bb12059cf h1:We/0OFP0IDdtBSvULjBh7xP6BVGDWhfPGT1vgA0PKe8= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240403200719-b93bb12059cf/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= @@ -1333,8 +1335,6 @@ github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8 github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= -github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 h1:A2TJR22bK2QzwU+qkp6KL3g4/3kCAOLAhDDCX07QBms= -github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= github.com/onflow/sdks v0.5.0 h1:2HCRibwqDaQ1c9oUApnkZtEAhWiNY2GTpRD5+ftdkN8= From f319aa252dd6e41bd38f204b700b17f83f6783d6 Mon Sep 17 00:00:00 2001 From: Andrii Slisarchuk Date: Thu, 4 Apr 2024 11:54:18 +0300 Subject: [PATCH 061/148] updated protobuf --- go.mod | 2 +- go.sum | 4 ++-- insecure/go.mod | 2 +- insecure/go.sum | 4 ++-- integration/go.mod | 2 +- integration/go.sum | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index a91e6cdacfe..7ec53fca0e1 100644 --- a/go.mod +++ b/go.mod @@ -321,4 +321,4 @@ require ( // Using custom fork until https://github.com/onflow/flow-go/issues/5338 is resolved replace github.com/ipfs/boxo => github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 -replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240403200719-b93bb12059cf +replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404084456-05c4e0c60d0f diff --git a/go.sum b/go.sum index a68b9049f79..65e178e2f2b 100644 --- a/go.sum +++ b/go.sum @@ -111,8 +111,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240403200719-b93bb12059cf h1:We/0OFP0IDdtBSvULjBh7xP6BVGDWhfPGT1vgA0PKe8= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240403200719-b93bb12059cf/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404084456-05c4e0c60d0f h1:RakpkBSG7W1MG/R3VP1/0e+7PCzLB0DG+M4gN66gCyQ= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404084456-05c4e0c60d0f/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= diff --git a/insecure/go.mod b/insecure/go.mod index 45a55e5a3ac..49a0a0ce6c1 100644 --- a/insecure/go.mod +++ b/insecure/go.mod @@ -305,4 +305,4 @@ require ( replace github.com/onflow/flow-go => ../ -replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240403200719-b93bb12059cf +replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404084456-05c4e0c60d0f diff --git a/insecure/go.sum b/insecure/go.sum index 6fd8e1063c0..3a2ec836558 100644 --- a/insecure/go.sum +++ b/insecure/go.sum @@ -101,8 +101,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240403200719-b93bb12059cf h1:We/0OFP0IDdtBSvULjBh7xP6BVGDWhfPGT1vgA0PKe8= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240403200719-b93bb12059cf/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404084456-05c4e0c60d0f h1:RakpkBSG7W1MG/R3VP1/0e+7PCzLB0DG+M4gN66gCyQ= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404084456-05c4e0c60d0f/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= diff --git a/integration/go.mod b/integration/go.mod index 00305e7f512..96ad8f08ad6 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -362,4 +362,4 @@ replace github.com/onflow/flow-go => ../ replace github.com/onflow/flow-go/insecure => ../insecure -replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240403200719-b93bb12059cf +replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404084456-05c4e0c60d0f diff --git a/integration/go.sum b/integration/go.sum index a6f821108e0..28521292468 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -112,8 +112,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240403200719-b93bb12059cf h1:We/0OFP0IDdtBSvULjBh7xP6BVGDWhfPGT1vgA0PKe8= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240403200719-b93bb12059cf/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404084456-05c4e0c60d0f h1:RakpkBSG7W1MG/R3VP1/0e+7PCzLB0DG+M4gN66gCyQ= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404084456-05c4e0c60d0f/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= From 286bcf329f4225e13d74cdb8799fd73bc70831fc Mon Sep 17 00:00:00 2001 From: Andrii Date: Thu, 4 Apr 2024 13:00:11 +0300 Subject: [PATCH 062/148] Fixed naming --- access/handler.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/access/handler.go b/access/handler.go index aeb5c03d2ec..a191f333662 100644 --- a/access/handler.go +++ b/access/handler.go @@ -708,9 +708,9 @@ func (h *Handler) GetExecutionResultForBlockID(ctx context.Context, req *access. func (h *Handler) GetExecutionResultByID(ctx context.Context, req *access.GetExecutionResultByIDRequest) (*access.ExecutionResultByIDResponse, error) { metadata := h.buildMetadataResponse() - resultId := convert.MessageToIdentifier(req.GetId()) + resultID := convert.MessageToIdentifier(req.GetId()) - result, err := h.api.GetExecutionResultByID(ctx, resultId) + result, err := h.api.GetExecutionResultByID(ctx, resultID) if err != nil { return nil, err } From 7df7be72c4885cebed2a1702a764b2119ca9feb1 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Thu, 4 Apr 2024 13:03:56 +0300 Subject: [PATCH 063/148] Linted --- .../access/cohort3/grpc_streaming_blocks_test.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/integration/tests/access/cohort3/grpc_streaming_blocks_test.go b/integration/tests/access/cohort3/grpc_streaming_blocks_test.go index 950db06047f..b87dff02645 100644 --- a/integration/tests/access/cohort3/grpc_streaming_blocks_test.go +++ b/integration/tests/access/cohort3/grpc_streaming_blocks_test.go @@ -6,17 +6,20 @@ import ( "testing" "time" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/integration/testnet" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" + accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow/protobuf/go/flow/entities" - "github.com/rs/zerolog" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" ) func TestGrpcBlocksStream(t *testing.T) { @@ -157,7 +160,7 @@ func (s *GrpcBlocksStreamSuite) TestHappyPath() { select { case err := <-accessBlockErrs: s.Require().NoErrorf(err, "unexpected AN error") - case _ = <-observerBlockErrs: + case err := <-observerBlockErrs: s.Require().NoErrorf(err, "unexpected ON error") case block := <-accessBlocks: s.T().Logf("AN block received: height: %d", block.Header.Height) From fdc9b864542fd55b78896335fa807662f52eeffd Mon Sep 17 00:00:00 2001 From: Andrii Date: Thu, 4 Apr 2024 14:44:08 +0300 Subject: [PATCH 064/148] Refactored --- .../cohort2/observer_indexer_enabled_test.go | 52 ++++++++++++------- 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_test.go index cb7273f65a6..58ac0932861 100644 --- a/integration/tests/access/cohort2/observer_indexer_enabled_test.go +++ b/integration/tests/access/cohort2/observer_indexer_enabled_test.go @@ -378,25 +378,26 @@ func (s *ObserverIndexerEnabledSuite) TestAllObserverIndexedRPCsHappyPath() { return statusErr.Code() != codes.OutOfRange }, 30*time.Second, 1*time.Second) - log := unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) - log.Info().Msg("================> onverted.Payload.Results") - blockWithAccount, err := observerLocal.GetBlockByID(ctx, &accessproto.GetBlockByIDRequest{ Id: accountCreationTxRes.BlockID[:], FullBlockResponse: true, }) require.NoError(t, err) - eventsByBlockID, err := observerLocal.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ - Type: sdk.EventAccountCreated, - BlockIds: [][]byte{blockWithAccount.Block.Id}, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) + //eventsByBlockID, err := observerLocal.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + // Type: sdk.EventAccountCreated, + // BlockIds: [][]byte{blockWithAccount.Block.Id}, + // EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + //}) + //require.NoError(s.T(), err) // GetEventsForBlockIDs - s.checkGetEventsForBlockIDsRPC(ctx, eventsByBlockID, observerUpstream, accessNode, [][]byte{blockWithAccount.Block.Id}) + s.checkGetEventsForBlockIDsRPC(ctx, observerLocal, observerUpstream, accessNode, [][]byte{blockWithAccount.Block.Id}) + + // GetEventsForHeightRange + s.checkGetEventsForHeightRangeRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Height, blockWithAccount.Block.Height) + // Making GetEventsForHeightRange call to get events and then get txIndex eventsByHeight, err := observerLocal.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ Type: sdk.EventAccountCreated, StartHeight: blockWithAccount.Block.Height, @@ -405,12 +406,6 @@ func (s *ObserverIndexerEnabledSuite) TestAllObserverIndexedRPCsHappyPath() { }) require.NoError(s.T(), err) - // GetEventsForHeightRange - s.checkGetEventsForHeightRangeRPC(ctx, eventsByHeight, observerUpstream, accessNode, blockWithAccount.Block.Height, blockWithAccount.Block.Height) - - // validate that there is an event that we are looking for - require.Equal(t, eventsByHeight.Results, eventsByBlockID.Results) - var txIndex uint32 found := false for _, eventsInBlock := range eventsByHeight.Results { @@ -443,7 +438,7 @@ func (s *ObserverIndexerEnabledSuite) TestAllObserverIndexedRPCsHappyPath() { s.checkGetTransactionResultRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.TransactionID.Bytes(), blockWithAccount.Block.Id, accountCreationTxRes.CollectionID.Bytes()) //GetTransactionResultByIndex - s.checkGetTransactionResultsByIndexIDRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id, txIndex) + s.checkGetTransactionResultsByIndexRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id, txIndex) // GetTransactionResultsByBlockID s.checkGetTransactionResultsByBlockIDRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) @@ -667,11 +662,19 @@ func (s *ObserverIndexerEnabledSuite) getRestEndpoints() []RestEndpointTest { func (s *ObserverIndexerEnabledSuite) checkGetEventsForBlockIDsRPC( ctx context.Context, - observerLocalResponse *accessproto.EventsResponse, + observerLocal accessproto.AccessAPIClient, observerUpstream accessproto.AccessAPIClient, accessNode accessproto.AccessAPIClient, blockIds [][]byte, ) { + + observerLocalResponse, err := observerLocal.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + Type: sdk.EventAccountCreated, + BlockIds: blockIds, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + observerUpstreamResponse, err := observerUpstream.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ Type: sdk.EventAccountCreated, BlockIds: blockIds, @@ -692,12 +695,21 @@ func (s *ObserverIndexerEnabledSuite) checkGetEventsForBlockIDsRPC( func (s *ObserverIndexerEnabledSuite) checkGetEventsForHeightRangeRPC( ctx context.Context, - observerLocalResponse *accessproto.EventsResponse, + observerLocal accessproto.AccessAPIClient, observerUpstream accessproto.AccessAPIClient, accessNode accessproto.AccessAPIClient, startHeight uint64, endHeight uint64, ) { + + observerLocalResponse, err := observerLocal.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: startHeight, + EndHeight: endHeight, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + require.NoError(s.T(), err) + observerUpstreamResponse, err := observerUpstream.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ Type: sdk.EventAccountCreated, StartHeight: startHeight, @@ -903,7 +915,7 @@ func (s *ObserverIndexerEnabledSuite) checkGetTransactionResultsByBlockIDRPC( require.Equal(s.T(), accessNodeResponse.TransactionResults, observerUpstreamResponse.TransactionResults) } -func (s *ObserverIndexerEnabledSuite) checkGetTransactionResultsByIndexIDRPC( +func (s *ObserverIndexerEnabledSuite) checkGetTransactionResultsByIndexRPC( ctx context.Context, observerLocal accessproto.AccessAPIClient, observerUpstream accessproto.AccessAPIClient, From be28df2df611d96ceac716c6fb51ab805e39fc3d Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Thu, 4 Apr 2024 15:25:12 +0300 Subject: [PATCH 065/148] Updated error message for integration test --- integration/tests/access/cohort3/grpc_state_stream_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/tests/access/cohort3/grpc_state_stream_test.go b/integration/tests/access/cohort3/grpc_state_stream_test.go index e946c2de9ae..f8b5dda8bb8 100644 --- a/integration/tests/access/cohort3/grpc_state_stream_test.go +++ b/integration/tests/access/cohort3/grpc_state_stream_test.go @@ -355,7 +355,7 @@ func SubscribeHandler[T any, V any]( return } - sendErr(fmt.Errorf("error receiving block: %w", err)) + sendErr(fmt.Errorf("error receiving response: %w", err)) return } From 3eff3b838e9d65020d8503c00e591abde64849a4 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Thu, 4 Apr 2024 15:07:34 +0200 Subject: [PATCH 066/148] fix doc string --- integration/benchmark/cmd/ci/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/benchmark/cmd/ci/main.go b/integration/benchmark/cmd/ci/main.go index 4db99ae21cd..c3917f5b161 100644 --- a/integration/benchmark/cmd/ci/main.go +++ b/integration/benchmark/cmd/ci/main.go @@ -52,7 +52,7 @@ func main() { initialTPSFlag := flag.Int("tps-initial", 10, "starting transactions per second") maxTPSFlag := flag.Int("tps-max", *initialTPSFlag, "maximum transactions per second allowed") minTPSFlag := flag.Int("tps-min", *initialTPSFlag, "minimum transactions per second allowed") - loadTypeFlag := flag.String("load-type", string(defaultLoadType), "load type (token-transfer / const-exec / evm from the load config file)") + loadTypeFlag := flag.String("load-type", string(defaultLoadType), "load type (token-transfer / const-exec / evm) from the load config file") loadConfigFileLocationFlag := flag.String("load-config", "", "load config file location. If not provided, default config will be used.") adjustIntervalFlag := flag.Duration("tps-adjust-interval", defaultAdjustInterval, "interval for adjusting TPS") From 204e858faa27dcd5e6ad931f9b89424e560f3187 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 4 Apr 2024 09:42:45 -0700 Subject: [PATCH 067/148] validate checkpoint head file --- ledger/complete/wal/checkpoint_v6_reader.go | 68 ++++++++++++++++++++- 1 file changed, 66 insertions(+), 2 deletions(-) diff --git a/ledger/complete/wal/checkpoint_v6_reader.go b/ledger/complete/wal/checkpoint_v6_reader.go index 460343c49b4..b6a011916ea 100644 --- a/ledger/complete/wal/checkpoint_v6_reader.go +++ b/ledger/complete/wal/checkpoint_v6_reader.go @@ -20,8 +20,17 @@ import ( // ErrEOFNotReached for indicating end of file not reached error var ErrEOFNotReached = errors.New("expect to reach EOF, but actually didn't") -// TODO: validate the header file and the sub file that contains the root hashes -var ReadTriesRootHash = readTriesRootHash +func ReadTriesRootHash(logger zerolog.Logger, dir string, fileName string) ( + trieRootsToReturn []ledger.RootHash, + errToReturn error, +) { + errToReturn = validateCheckpointFile(logger, dir, fileName) + if errToReturn != nil { + return nil, errToReturn + } + return readTriesRootHash(logger, dir, fileName) +} + var CheckpointHasRootHash = checkpointHasRootHash // readCheckpointV6 reads checkpoint file from a main file and 17 file parts. @@ -849,3 +858,58 @@ func ensureReachedEOF(reader io.Reader) error { return fmt.Errorf("fail to check if reached EOF: %w", err) } + +func validateCheckpointFile(logger zerolog.Logger, dir, fileName string) error { + headerPath := filePathCheckpointHeader(dir, fileName) + // validate header file + subtrieChecksums, topTrieChecksum, err := readCheckpointHeader(headerPath, logger) + if err != nil { + return err + } + + // validate subtrie files + for index, expectedSum := range subtrieChecksums { + filepath, _, err := filePathSubTries(dir, fileName, index) + if err != nil { + return err + } + err = withFile(logger, filepath, func(f *os.File) error { + _, checksum, err := readSubTriesFooter(f) + if err != nil { + return fmt.Errorf("cannot read sub trie node count: %w", err) + } + + if checksum != expectedSum { + return fmt.Errorf("mismatch checksum in subtrie file. checksum from checkpoint header %v does not "+ + "match with the checksum in subtrie file %v", checksum, expectedSum) + } + return nil + }) + + if err != nil { + return err + } + } + + // validate top trie file + filepath, _ := filePathTopTries(dir, fileName) + err = withFile(logger, filepath, func(file *os.File) error { + // read subtrie Node count and validate + _, _, checkSum, err := readTopTriesFooter(file) + if err != nil { + return err + } + + if topTrieChecksum != checkSum { + return fmt.Errorf("mismatch top trie checksum, header file has %v, toptrie file has %v", + topTrieChecksum, checkSum) + } + + return nil + }) + if err != nil { + return err + } + + return nil +} From 3c4bb931b14ee5eb25ad657a3befa7db2b9bdee7 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 4 Apr 2024 10:21:09 -0700 Subject: [PATCH 068/148] add test case --- ledger/complete/wal/checkpoint_v6_reader.go | 10 ++++---- ledger/complete/wal/checkpoint_v6_test.go | 27 +++++++++++++++++++++ 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/ledger/complete/wal/checkpoint_v6_reader.go b/ledger/complete/wal/checkpoint_v6_reader.go index b6a011916ea..8408b2a1683 100644 --- a/ledger/complete/wal/checkpoint_v6_reader.go +++ b/ledger/complete/wal/checkpoint_v6_reader.go @@ -21,12 +21,12 @@ import ( var ErrEOFNotReached = errors.New("expect to reach EOF, but actually didn't") func ReadTriesRootHash(logger zerolog.Logger, dir string, fileName string) ( - trieRootsToReturn []ledger.RootHash, - errToReturn error, + []ledger.RootHash, + error, ) { - errToReturn = validateCheckpointFile(logger, dir, fileName) - if errToReturn != nil { - return nil, errToReturn + err := validateCheckpointFile(logger, dir, fileName) + if err != nil { + return nil, err } return readTriesRootHash(logger, dir, fileName) } diff --git a/ledger/complete/wal/checkpoint_v6_test.go b/ledger/complete/wal/checkpoint_v6_test.go index 1bf95e17419..ded3acf3e13 100644 --- a/ledger/complete/wal/checkpoint_v6_test.go +++ b/ledger/complete/wal/checkpoint_v6_test.go @@ -608,6 +608,33 @@ func TestReadCheckpointRootHash(t *testing.T) { }) } +func TestReadCheckpointRootHashValidateChecksum(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + tries := createSimpleTrie(t) + fileName := "checkpoint" + logger := unittest.Logger() + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") + + // add a wrong checksum to top trie file + topTrieFilePath, _ := filePathTopTries(dir, fileName) + file, err := os.OpenFile(topTrieFilePath, os.O_RDWR, 0644) + require.NoError(t, err) + + fileInfo, err := file.Stat() + require.NoError(t, err) + fileSize := fileInfo.Size() + + invalidSum := encodeCRC32Sum(10) + _, err = file.WriteAt(invalidSum, fileSize-crc32SumSize) + require.NoError(t, err) + require.NoError(t, file.Close()) + + // ReadTriesRootHash will first validate the checksum and detect the error + _, err = ReadTriesRootHash(logger, dir, fileName) + require.Error(t, err) + }) +} + func TestReadCheckpointRootHashMulti(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createMultipleRandomTries(t) From b28dcd5d4d41cf61205a78a107cdeccd76a4bb5a Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Fri, 5 Apr 2024 00:36:56 +0300 Subject: [PATCH 069/148] Removed unnecessary comment from test --- integration/tests/access/cohort3/grpc_state_stream_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/tests/access/cohort3/grpc_state_stream_test.go b/integration/tests/access/cohort3/grpc_state_stream_test.go index 0bc293b2170..1f81b3e2a96 100644 --- a/integration/tests/access/cohort3/grpc_state_stream_test.go +++ b/integration/tests/access/cohort3/grpc_state_stream_test.go @@ -334,7 +334,7 @@ func (s *GrpcStateStreamSuite) getRPCs() []subscribeEventsRPCTest { type ResponseTracker[T any] struct { r map[uint64]map[string]T mu sync.RWMutex - compare func(t *testing.T, responses map[uint64]map[string]T, blockHeight uint64) error //func(control, test T) error + compare func(t *testing.T, responses map[uint64]map[string]T, blockHeight uint64) error } // NewResponseTracker creates a new ResponseTracker. From b4a19f0a6a469a2164fcd1fdc34485dc4898cd16 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Fri, 5 Apr 2024 00:41:33 +0300 Subject: [PATCH 070/148] Renamed variable in test --- integration/tests/access/cohort3/grpc_state_stream_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration/tests/access/cohort3/grpc_state_stream_test.go b/integration/tests/access/cohort3/grpc_state_stream_test.go index 1f81b3e2a96..beb37bb0f89 100644 --- a/integration/tests/access/cohort3/grpc_state_stream_test.go +++ b/integration/tests/access/cohort3/grpc_state_stream_test.go @@ -450,7 +450,7 @@ func SubscribeHandler[T any, V any]( defer close(errChan) for { - t, err := recv() + resp, err := recv() if err != nil { if err == io.EOF { return @@ -460,7 +460,7 @@ func SubscribeHandler[T any, V any]( return } - response, err := responseHandler(t) + response, err := responseHandler(resp) if err != nil { sendErr(fmt.Errorf("error converting response: %w", err)) return From a5c29acbf6924b8457ada96f139296a45a88dc6f Mon Sep 17 00:00:00 2001 From: Andrii Slisarchuk Date: Fri, 5 Apr 2024 01:32:35 +0300 Subject: [PATCH 071/148] Fixed issues with integration test --- engine/access/rpc/backend/backend.go | 2 +- .../backend/backend_stream_transactions.go | 37 ++++++++++--------- .../backend_stream_transactions_test.go | 10 +++-- 3 files changed, 27 insertions(+), 22 deletions(-) diff --git a/engine/access/rpc/backend/backend.go b/engine/access/rpc/backend/backend.go index 209fcff8dac..daefbd3182c 100644 --- a/engine/access/rpc/backend/backend.go +++ b/engine/access/rpc/backend/backend.go @@ -282,7 +282,7 @@ func New(params Params) (*Backend, error) { } b.backendTransactions.txErrorMessages = b - + b.backendSubscribeTransactions.backendTransactions = &b.backendTransactions retry.SetBackend(b) preferredENIdentifiers, err = identifierList(params.PreferredExecutionNodeIDs) diff --git a/engine/access/rpc/backend/backend_stream_transactions.go b/engine/access/rpc/backend/backend_stream_transactions.go index 2bdec081c06..17ade5d6844 100644 --- a/engine/access/rpc/backend/backend_stream_transactions.go +++ b/engine/access/rpc/backend/backend_stream_transactions.go @@ -26,6 +26,7 @@ import ( // backendSubscribeTransactions handles transaction subscriptions. type backendSubscribeTransactions struct { txLocalDataProvider *TransactionsLocalDataProvider + backendTransactions *backendTransactions executionResults storage.ExecutionResults log zerolog.Logger broadcaster *engine.Broadcaster @@ -112,7 +113,7 @@ func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *Tran // Check, if transaction executed and transaction result already available if txInfo.blockWithTx != nil && !txInfo.txExecuted { - txResult, err := b.searchForTransactionResult(ctx, txInfo.BlockID, txInfo.TransactionID) + txResult, err := b.searchForTransactionResult(ctx, txInfo) if err != nil { return nil, status.Errorf(codes.Internal, "failed to get execution result for block %s: %v", txInfo.BlockID, err) } @@ -171,17 +172,17 @@ func (b *backendSubscribeTransactions) generateResultsWithMissingStatuses( var results []*access.TransactionResult // If the difference between statuses' values is more than one step, fill in the missing results. - if txInfo.Status-prevTxStatus > 1 { + if (txInfo.Status - prevTxStatus) > 1 { for missingStatus := prevTxStatus + 1; missingStatus < txInfo.Status; missingStatus++ { - var missingTxResult *access.TransactionResult + var missingTxResult access.TransactionResult switch missingStatus { case flow.TransactionStatusPending: - missingTxResult = &access.TransactionResult{ + missingTxResult = access.TransactionResult{ Status: missingStatus, TransactionID: txInfo.TransactionID, } case flow.TransactionStatusFinalized: - missingTxResult = &access.TransactionResult{ + missingTxResult = access.TransactionResult{ Status: missingStatus, TransactionID: txInfo.TransactionID, BlockID: txInfo.BlockID, @@ -189,17 +190,16 @@ func (b *backendSubscribeTransactions) generateResultsWithMissingStatuses( CollectionID: txInfo.CollectionID, } case flow.TransactionStatusExecuted: - missingTxResult = txInfo.TransactionResult + missingTxResult = *txInfo.TransactionResult missingTxResult.Status = missingStatus default: return nil, fmt.Errorf("unexpected missing transaction status") } - results = append(results, missingTxResult) + results = append(results, &missingTxResult) } } results = append(results, txInfo.TransactionResult) - return results, nil } @@ -223,7 +223,7 @@ func (b *backendSubscribeTransactions) validateBlockHeight(height uint64) error // searchForTransactionBlockInfo searches for the block containing the specified transaction. // It retrieves the block at the given height and checks if the transaction is included in that block. // Expected errors: -// - subscription.ErrBlockNotReady when unable to retrieve the block or collection ID +// - ErrTransactionNotInBlock when unable to retrieve the collection // - codes.Internal when other errors occur during block or collection lookup func (b *backendSubscribeTransactions) searchForTransactionBlockInfo( height uint64, @@ -251,23 +251,24 @@ func (b *backendSubscribeTransactions) searchForTransactionBlockInfo( // - codes.Internal if an internal error occurs while retrieving execution result. func (b *backendSubscribeTransactions) searchForTransactionResult( ctx context.Context, - blockID flow.Identifier, - txID flow.Identifier, + txInfo *TransactionSubscriptionMetadata, ) (*access.TransactionResult, error) { - _, err := b.executionResults.ByBlockID(blockID) + _, err := b.executionResults.ByBlockID(txInfo.BlockID) if err != nil { if errors.Is(err, storage.ErrNotFound) { return nil, nil } - return nil, fmt.Errorf("failed to get execution result for block %s: %w", blockID, err) + return nil, fmt.Errorf("failed to get execution result for block %s: %w", txInfo.BlockID, err) } - block, err := b.txLocalDataProvider.blocks.ByID(blockID) - if err != nil { - return nil, fmt.Errorf("error looking up block: %w", err) - } + txResult, err := b.backendTransactions.GetTransactionResult( + ctx, + txInfo.TransactionID, + txInfo.BlockID, + txInfo.CollectionID, + entities.EventEncodingVersion_CCF_V0, + ) - txResult, err := b.txLocalDataProvider.GetTransactionResultFromStorage(ctx, block, txID, entities.EventEncodingVersion_CCF_V0) if err != nil { // if either the storage or execution node reported no results or there were not enough execution results if status.Code(err) == codes.NotFound { diff --git a/engine/access/rpc/backend/backend_stream_transactions_test.go b/engine/access/rpc/backend/backend_stream_transactions_test.go index d7b6cd6f181..681d86dde50 100644 --- a/engine/access/rpc/backend/backend_stream_transactions_test.go +++ b/engine/access/rpc/backend/backend_stream_transactions_test.go @@ -204,8 +204,10 @@ func (s *TransactionStatusSuite) backendParams() Params { ResponseLimit: subscription.DefaultResponseLimit, Broadcaster: s.broadcaster, }, - TxResultsIndex: index.NewTransactionResultsIndex(s.transactionResults), - EventsIndex: index.NewEventsIndex(s.events), + TxResultsIndex: index.NewTransactionResultsIndex(s.transactionResults), + EventQueryMode: IndexQueryModeLocalOnly, + TxResultQueryMode: IndexQueryModeLocalOnly, + EventsIndex: index.NewEventsIndex(s.events), } } @@ -251,6 +253,8 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { // Generate sent transaction with ref block of the current finalized block transaction := unittest.TransactionFixture() transaction.SetReferenceBlockID(s.finalizedBlock.ID()) + s.transactions.On("ByID", mock.AnythingOfType("flow.Identifier")).Return(&transaction.TransactionBody, nil) + col := flow.CollectionFromTransactions([]*flow.Transaction{&transaction}) guarantee := col.Guarantee() light := col.Light() @@ -295,7 +299,7 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { result := txResults[0] assert.Equal(s.T(), txId, result.TransactionID) assert.Equal(s.T(), expectedTxStatus, result.Status) - }, 60*time.Second, fmt.Sprintf("timed out waiting for transaction info:\n\t- txID: %x\n\t- blockID: %x", txId, s.finalizedBlock.ID())) + }, time.Second, fmt.Sprintf("timed out waiting for transaction info:\n\t- txID: %x\n\t- blockID: %x", txId, s.finalizedBlock.ID())) } // 1. Subscribe to transaction status and receive the first message with pending status From b27800f3cd71fffb4a4856484d057ec194a8e7d4 Mon Sep 17 00:00:00 2001 From: Andrii Slisarchuk Date: Fri, 5 Apr 2024 02:05:07 +0300 Subject: [PATCH 072/148] Added event encoding version --- access/api.go | 2 +- access/handler.go | 2 +- access/mock/api.go | 10 +++++----- .../backend/backend_stream_transactions.go | 20 ++++++++++++------- .../backend_stream_transactions_test.go | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- insecure/go.mod | 2 +- insecure/go.sum | 4 ++-- integration/go.mod | 2 +- integration/go.sum | 4 ++-- .../tests/access/cohort1/access_api_test.go | 3 ++- 12 files changed, 33 insertions(+), 26 deletions(-) diff --git a/access/api.go b/access/api.go index 3201796c6ed..4a5bcbc7de3 100644 --- a/access/api.go +++ b/access/api.go @@ -197,7 +197,7 @@ type API interface { // SubscribeTransactionStatuses streams transaction statuses starting from the reference block saved in the // transaction itself until the block containing the transaction becomes sealed or expired. When the transaction // status becomes TransactionStatusSealed or TransactionStatusExpired, the subscription will automatically shut down. - SubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody) subscription.Subscription + SubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody, requiredEventEncodingVersion entities.EventEncodingVersion) subscription.Subscription } // TODO: Combine this with flow.TransactionResult? diff --git a/access/handler.go b/access/handler.go index adf6e944c8a..3c5e7d4a0f4 100644 --- a/access/handler.go +++ b/access/handler.go @@ -1113,7 +1113,7 @@ func (h *Handler) SendAndSubscribeTransactionStatuses( return err } - sub := h.api.SubscribeTransactionStatuses(ctx, &tx) + sub := h.api.SubscribeTransactionStatuses(ctx, &tx, request.GetEventEncodingVersion()) messageIndex := counters.NewMonotonousCounter(0) return subscription.HandleSubscription(sub, func(txResults []*TransactionResult) error { diff --git a/access/mock/api.go b/access/mock/api.go index b27e8a03580..8e6f8e53936 100644 --- a/access/mock/api.go +++ b/access/mock/api.go @@ -977,13 +977,13 @@ func (_m *API) SubscribeBlocksFromStartHeight(ctx context.Context, startHeight u return r0 } -// SubscribeTransactionStatuses provides a mock function with given fields: ctx, tx -func (_m *API) SubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody) subscription.Subscription { - ret := _m.Called(ctx, tx) +// SubscribeTransactionStatuses provides a mock function with given fields: ctx, tx, requiredEventEncodingVersion +func (_m *API) SubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody, requiredEventEncodingVersion entities.EventEncodingVersion) subscription.Subscription { + ret := _m.Called(ctx, tx, requiredEventEncodingVersion) var r0 subscription.Subscription - if rf, ok := ret.Get(0).(func(context.Context, *flow.TransactionBody) subscription.Subscription); ok { - r0 = rf(ctx, tx) + if rf, ok := ret.Get(0).(func(context.Context, *flow.TransactionBody, entities.EventEncodingVersion) subscription.Subscription); ok { + r0 = rf(ctx, tx, requiredEventEncodingVersion) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(subscription.Subscription) diff --git a/engine/access/rpc/backend/backend_stream_transactions.go b/engine/access/rpc/backend/backend_stream_transactions.go index f1e9cfeec48..08628f9b289 100644 --- a/engine/access/rpc/backend/backend_stream_transactions.go +++ b/engine/access/rpc/backend/backend_stream_transactions.go @@ -35,14 +35,19 @@ type backendSubscribeTransactions struct { // TransactionSubscriptionMetadata holds data representing the status state for each transaction subscription. type TransactionSubscriptionMetadata struct { *access.TransactionResult - txReferenceBlockID flow.Identifier - blockWithTx *flow.Header - txExecuted bool + txReferenceBlockID flow.Identifier + blockWithTx *flow.Header + txExecuted bool + eventEncodingVersion entities.EventEncodingVersion } // SubscribeTransactionStatuses subscribes to transaction status changes starting from the transaction reference block ID. // If invalid tx parameters will be supplied SubscribeTransactionStatuses will return a failed subscription. -func (b *backendSubscribeTransactions) SubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody) subscription.Subscription { +func (b *backendSubscribeTransactions) SubscribeTransactionStatuses( + ctx context.Context, + tx *flow.TransactionBody, + requiredEventEncodingVersion entities.EventEncodingVersion, +) subscription.Subscription { nextHeight, err := b.blockTracker.GetStartHeightFromBlockID(tx.ReferenceBlockID) if err != nil { return subscription.NewFailedSubscription(err, "could not get start height") @@ -54,8 +59,9 @@ func (b *backendSubscribeTransactions) SubscribeTransactionStatuses(ctx context. BlockID: flow.ZeroID, Status: flow.TransactionStatusUnknown, }, - txReferenceBlockID: tx.ReferenceBlockID, - blockWithTx: nil, + txReferenceBlockID: tx.ReferenceBlockID, + blockWithTx: nil, + eventEncodingVersion: requiredEventEncodingVersion, } return b.subscriptionHandler.Subscribe(ctx, nextHeight, b.getTransactionStatusResponse(&txInfo)) @@ -253,7 +259,7 @@ func (b *backendSubscribeTransactions) searchForTransactionResult( txInfo.TransactionID, txInfo.BlockID, txInfo.CollectionID, - entities.EventEncodingVersion_CCF_V0, + txInfo.eventEncodingVersion, ) if err != nil { diff --git a/engine/access/rpc/backend/backend_stream_transactions_test.go b/engine/access/rpc/backend/backend_stream_transactions_test.go index 4f26c36f2f6..598105bf7bd 100644 --- a/engine/access/rpc/backend/backend_stream_transactions_test.go +++ b/engine/access/rpc/backend/backend_stream_transactions_test.go @@ -304,7 +304,7 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { } // 1. Subscribe to transaction status and receive the first message with pending status - sub := s.backend.SubscribeTransactionStatuses(ctx, &transaction.TransactionBody) + sub := s.backend.SubscribeTransactionStatuses(ctx, &transaction.TransactionBody, entities.EventEncodingVersion_CCF_V0) checkNewSubscriptionMessage(sub, flow.TransactionStatusPending) // 2. Make transaction reference block sealed, and add a new finalized block that includes the transaction @@ -374,7 +374,7 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusExpired() { } // Subscribe to transaction status and receive the first message with pending status - sub := s.backend.SubscribeTransactionStatuses(ctx, &transaction.TransactionBody) + sub := s.backend.SubscribeTransactionStatuses(ctx, &transaction.TransactionBody, entities.EventEncodingVersion_CCF_V0) checkNewSubscriptionMessage(sub, flow.TransactionStatusPending) // Generate 600 blocks without transaction included and check, that transaction still pending diff --git a/go.mod b/go.mod index c9c28ac961b..2481ee6bd2a 100644 --- a/go.mod +++ b/go.mod @@ -321,4 +321,4 @@ require ( // Using custom fork until https://github.com/onflow/flow-go/issues/5338 is resolved replace github.com/ipfs/boxo => github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 -replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404224351-97eb40508407 +replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1 diff --git a/go.sum b/go.sum index a9e257f8719..c769fa46e98 100644 --- a/go.sum +++ b/go.sum @@ -111,8 +111,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404224351-97eb40508407 h1:E/Q6mpjSjJ9ncTfGJvARlg0hTdxYnA4Bg+UYhVIEFbM= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404224351-97eb40508407/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1 h1:T89Ybbj5UYJWBOfXA/c5NyrKmnlccP6gP6CaCu1xE6k= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= diff --git a/insecure/go.mod b/insecure/go.mod index d5cdedb7371..472e5d215ac 100644 --- a/insecure/go.mod +++ b/insecure/go.mod @@ -305,4 +305,4 @@ require ( replace github.com/onflow/flow-go => ../ -replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404224351-97eb40508407 +replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1 diff --git a/insecure/go.sum b/insecure/go.sum index 280d41ea4b7..c936850d0d3 100644 --- a/insecure/go.sum +++ b/insecure/go.sum @@ -101,8 +101,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404224351-97eb40508407 h1:E/Q6mpjSjJ9ncTfGJvARlg0hTdxYnA4Bg+UYhVIEFbM= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404224351-97eb40508407/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1 h1:T89Ybbj5UYJWBOfXA/c5NyrKmnlccP6gP6CaCu1xE6k= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= diff --git a/integration/go.mod b/integration/go.mod index 964914dd3a7..cffbea55a94 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -362,4 +362,4 @@ replace github.com/onflow/flow-go => ../ replace github.com/onflow/flow-go/insecure => ../insecure -replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404224351-97eb40508407 +replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240329135840-9f04832ba3a1 => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1 diff --git a/integration/go.sum b/integration/go.sum index da9a42a02ea..cc9a18ac333 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -112,8 +112,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404224351-97eb40508407 h1:E/Q6mpjSjJ9ncTfGJvARlg0hTdxYnA4Bg+UYhVIEFbM= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404224351-97eb40508407/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1 h1:T89Ybbj5UYJWBOfXA/c5NyrKmnlccP6gP6CaCu1xE6k= +github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= diff --git a/integration/tests/access/cohort1/access_api_test.go b/integration/tests/access/cohort1/access_api_test.go index 85de46bbd7b..2e89ab6a6f6 100644 --- a/integration/tests/access/cohort1/access_api_test.go +++ b/integration/tests/access/cohort1/access_api_test.go @@ -278,7 +278,8 @@ func (s *AccessAPISuite) TestSendAndSubscribeTransactionStatuses() { // Send and subscribe to the transaction status using the access API subClient, err := accessClient.SendAndSubscribeTransactionStatuses(s.ctx, &accessproto.SendAndSubscribeTransactionStatusesRequest{ - Transaction: transactionMsg, + Transaction: transactionMsg, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, }) s.Require().NoError(err) From f32b4b63330b6704697455e82526fb8df4b4d09a Mon Sep 17 00:00:00 2001 From: Andrii Date: Fri, 5 Apr 2024 15:36:10 +0300 Subject: [PATCH 073/148] Refactored test where all rpcs is being checked --- .../cohort2/observer_indexer_enabled_test.go | 578 ++++-------------- 1 file changed, 123 insertions(+), 455 deletions(-) diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_test.go index 58ac0932861..29b7c7df3ae 100644 --- a/integration/tests/access/cohort2/observer_indexer_enabled_test.go +++ b/integration/tests/access/cohort2/observer_indexer_enabled_test.go @@ -264,7 +264,6 @@ func (s *ObserverIndexerEnabledSuite) TestObserverIndexedRPCsHappyPath() { } } require.True(t, found) - } // TestAllObserverIndexedRPCsHappyPath tests the observer with the indexer enabled, @@ -384,79 +383,158 @@ func (s *ObserverIndexerEnabledSuite) TestAllObserverIndexedRPCsHappyPath() { }) require.NoError(t, err) - //eventsByBlockID, err := observerLocal.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ - // Type: sdk.EventAccountCreated, - // BlockIds: [][]byte{blockWithAccount.Block.Id}, - // EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - //}) - //require.NoError(s.T(), err) - - // GetEventsForBlockIDs - s.checkGetEventsForBlockIDsRPC(ctx, observerLocal, observerUpstream, accessNode, [][]byte{blockWithAccount.Block.Id}) + checkRPC := func(rpcCall func(client accessproto.AccessAPIClient) (any, error)) { + observerRes, err := rpcCall(observerLocal) + require.NoError(s.T(), err) + observerUpstreamRes, err := rpcCall(observerUpstream) + require.NoError(s.T(), err) + accessRes, err := rpcCall(accessNode) + require.NoError(s.T(), err) - // GetEventsForHeightRange - s.checkGetEventsForHeightRangeRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Height, blockWithAccount.Block.Height) + require.Equal(s.T(), observerRes, observerUpstreamRes) + require.Equal(s.T(), observerRes, accessRes) + } - // Making GetEventsForHeightRange call to get events and then get txIndex - eventsByHeight, err := observerLocal.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ - Type: sdk.EventAccountCreated, - StartHeight: blockWithAccount.Block.Height, - EndHeight: blockWithAccount.Block.Height, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + // GetEventsForBlockIDs + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + Type: sdk.EventAccountCreated, + BlockIds: [][]byte{blockWithAccount.Block.Id}, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + return res.Results, err }) - require.NoError(s.T(), err) var txIndex uint32 found := false - for _, eventsInBlock := range eventsByHeight.Results { - for _, event := range eventsInBlock.Events { - if event.Type == sdk.EventAccountCreated { - if bytes.Equal(event.Payload, accountCreatedPayload) { - found = true - txIndex = event.TransactionIndex + + // GetEventsForHeightRange + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: blockWithAccount.Block.Height, + EndHeight: blockWithAccount.Block.Height, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + + // Iterating through response Results to get txIndex of event + for _, eventsInBlock := range res.Results { + for _, event := range eventsInBlock.Events { + if event.Type == sdk.EventAccountCreated { + if bytes.Equal(event.Payload, accountCreatedPayload) { + found = true + txIndex = event.TransactionIndex + } } } } - } - require.True(t, found) + require.True(t, found) + return res.Results, err + }) // GetSystemTransaction - s.checkGetSystemTransactionRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) - - converted, err := convert.MessageToBlock(blockWithAccount.Block) - require.NoError(t, err) - - resultId := converted.Payload.Results[0].ID() + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ + BlockId: blockWithAccount.Block.Id, + }) + return res.Transaction, err + }) // GetExecutionResultByID - s.checkGetExecutionResultByIDRPC(ctx, observerLocal, observerUpstream, accessNode, convert.IdentifierToMessage(resultId)) + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + converted, err := convert.MessageToBlock(blockWithAccount.Block) + require.NoError(t, err) - //GetTransaction - s.checkGetTransactionRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.TransactionID.Bytes(), blockWithAccount.Block.Id, nil) + resultId := converted.Payload.Results[0].ID() + res, err := client.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ + Id: convert.IdentifierToMessage(resultId), + }) + return res.ExecutionResult, err + }) + + // GetTransaction + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransaction(ctx, &accessproto.GetTransactionRequest{ + Id: accountCreationTxRes.TransactionID.Bytes(), + BlockId: blockWithAccount.Block.Id, + CollectionId: nil, + }) + return res.Transaction, err + }) // GetTransactionResult - s.checkGetTransactionResultRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.TransactionID.Bytes(), blockWithAccount.Block.Id, accountCreationTxRes.CollectionID.Bytes()) + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ + Id: accountCreationTxRes.TransactionID.Bytes(), + BlockId: blockWithAccount.Block.Id, + CollectionId: accountCreationTxRes.CollectionID.Bytes(), + }) + return res.Events, err + }) - //GetTransactionResultByIndex - s.checkGetTransactionResultsByIndexRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id, txIndex) + // GetTransactionResultByIndex + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ + BlockId: blockWithAccount.Block.Id, + Index: txIndex, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + return res.Events, err + }) // GetTransactionResultsByBlockID - s.checkGetTransactionResultsByBlockIDRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockWithAccount.Block.Id, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + return res.TransactionResults, err + }) // GetTransactionsByBlockID - s.checkGetTransactionsByBlockIDRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id) + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockWithAccount.Block.Id, + }) + return res.Transactions, err + }) // GetCollectionByID - s.checkGetCollectionByIDRPC(ctx, observerLocal, observerUpstream, accessNode, accountCreationTxRes.CollectionID.Bytes()) + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ + Id: accountCreationTxRes.CollectionID.Bytes(), + }) + return res.Collection, err + }) // ExecuteScriptAtBlockHeight - s.checkExecuteScriptAtBlockHeightRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Height, []byte(simpleScript)) + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: blockWithAccount.Block.Height, + Script: []byte(simpleScript), + Arguments: make([][]byte, 0), + }) + return res.Value, err + }) // ExecuteScriptAtBlockID - s.checkExecuteScriptAtBlockIDRPC(ctx, observerLocal, observerUpstream, accessNode, blockWithAccount.Block.Id, []byte(simpleScript)) + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockWithAccount.Block.Id, + Script: []byte(simpleScript), + Arguments: make([][]byte, 0), + }) + return res.Value, err + }) // GetAccountAtBlockHeight - s.checkGetAccountAtBlockHeightRPC(ctx, observerLocal, observerUpstream, accessNode, newAccountAddress.Bytes(), accountCreationTxRes.BlockHeight) + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: newAccountAddress.Bytes(), + BlockHeight: accountCreationTxRes.BlockHeight, + }) + return res.Account, err + }) } func (s *ObserverIndexerEnabledSuite) getRPCs() []RPCTest { @@ -659,413 +737,3 @@ func (s *ObserverIndexerEnabledSuite) getRestEndpoints() []RestEndpointTest { }, } } - -func (s *ObserverIndexerEnabledSuite) checkGetEventsForBlockIDsRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockIds [][]byte, -) { - - observerLocalResponse, err := observerLocal.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ - Type: sdk.EventAccountCreated, - BlockIds: blockIds, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ - Type: sdk.EventAccountCreated, - BlockIds: blockIds, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ - Type: sdk.EventAccountCreated, - BlockIds: blockIds, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Results, observerLocalResponse.Results) - require.Equal(s.T(), accessNodeResponse.Results, observerUpstreamResponse.Results) -} - -func (s *ObserverIndexerEnabledSuite) checkGetEventsForHeightRangeRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - startHeight uint64, - endHeight uint64, -) { - - observerLocalResponse, err := observerLocal.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ - Type: sdk.EventAccountCreated, - StartHeight: startHeight, - EndHeight: endHeight, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ - Type: sdk.EventAccountCreated, - StartHeight: startHeight, - EndHeight: endHeight, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ - Type: sdk.EventAccountCreated, - StartHeight: startHeight, - EndHeight: endHeight, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Results, observerLocalResponse.Results) - require.Equal(s.T(), accessNodeResponse.Results, observerUpstreamResponse.Results) -} - -func (s *ObserverIndexerEnabledSuite) checkGetAccountAtBlockHeightRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - accountAddress []byte, - blockHeight uint64, -) { - - observerLocalResponse, err := observerLocal.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ - Address: accountAddress, - BlockHeight: blockHeight, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ - Address: accountAddress, - BlockHeight: blockHeight, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ - Address: accountAddress, - BlockHeight: blockHeight, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Account, observerLocalResponse.Account) - require.Equal(s.T(), accessNodeResponse.Account, observerUpstreamResponse.Account) -} - -func (s *ObserverIndexerEnabledSuite) checkGetSystemTransactionRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockId []byte, -) { - - observerLocalResponse, err := observerLocal.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ - BlockId: blockId, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ - BlockId: blockId, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ - BlockId: blockId, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Transaction, observerLocalResponse.Transaction) - require.Equal(s.T(), accessNodeResponse.Transaction, observerUpstreamResponse.Transaction) -} - -func (s *ObserverIndexerEnabledSuite) checkGetExecutionResultByIDRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - id []byte, -) { - - observerLocalResponse, err := observerLocal.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ - Id: id, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ - Id: id, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ - Id: id, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.ExecutionResult, observerLocalResponse.ExecutionResult) - require.Equal(s.T(), accessNodeResponse.ExecutionResult, observerUpstreamResponse.ExecutionResult) -} - -func (s *ObserverIndexerEnabledSuite) checkGetTransactionRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - id []byte, - blockId []byte, - collectionId []byte, -) { - - observerLocalResponse, err := observerLocal.GetTransaction(ctx, &accessproto.GetTransactionRequest{ - Id: id, - BlockId: blockId, - CollectionId: collectionId, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetTransaction(ctx, &accessproto.GetTransactionRequest{ - Id: id, - BlockId: blockId, - CollectionId: collectionId, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetTransaction(ctx, &accessproto.GetTransactionRequest{ - Id: id, - BlockId: blockId, - CollectionId: collectionId, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Transaction, observerLocalResponse.Transaction) - require.Equal(s.T(), accessNodeResponse.Transaction, observerUpstreamResponse.Transaction) -} - -func (s *ObserverIndexerEnabledSuite) checkGetTransactionResultRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - id []byte, - blockId []byte, - collectionId []byte, -) { - - observerLocalResponse, err := observerLocal.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ - Id: id, - BlockId: blockId, - CollectionId: collectionId, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ - Id: id, - BlockId: blockId, - CollectionId: collectionId, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ - Id: id, - BlockId: blockId, - CollectionId: collectionId, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Events, observerLocalResponse.Events) - require.Equal(s.T(), accessNodeResponse.Events, observerUpstreamResponse.Events) -} - -func (s *ObserverIndexerEnabledSuite) checkGetTransactionResultsByBlockIDRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockId []byte, -) { - - observerLocalResponse, err := observerLocal.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ - BlockId: blockId, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ - BlockId: blockId, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ - BlockId: blockId, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.TransactionResults, observerLocalResponse.TransactionResults) - require.Equal(s.T(), accessNodeResponse.TransactionResults, observerUpstreamResponse.TransactionResults) -} - -func (s *ObserverIndexerEnabledSuite) checkGetTransactionResultsByIndexRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockId []byte, - index uint32, -) { - observerLocalResponse, err := observerLocal.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ - BlockId: blockId, - Index: index, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ - BlockId: blockId, - Index: index, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ - BlockId: blockId, - Index: index, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Events, observerLocalResponse.Events) - require.Equal(s.T(), accessNodeResponse.Events, observerUpstreamResponse.Events) -} - -func (s *ObserverIndexerEnabledSuite) checkGetTransactionsByBlockIDRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockId []byte, -) { - - observerLocalResponse, err := observerLocal.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ - BlockId: blockId, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ - BlockId: blockId, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ - BlockId: blockId, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Transactions, observerLocalResponse.Transactions) - require.Equal(s.T(), accessNodeResponse.Transactions, observerUpstreamResponse.Transactions) -} - -func (s *ObserverIndexerEnabledSuite) checkGetCollectionByIDRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - collectionId []byte, -) { - - observerLocalResponse, err := observerLocal.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ - Id: collectionId, - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ - Id: collectionId, - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ - Id: collectionId, - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Collection, observerLocalResponse.Collection) - require.Equal(s.T(), accessNodeResponse.Collection, observerUpstreamResponse.Collection) -} - -func (s *ObserverIndexerEnabledSuite) checkExecuteScriptAtBlockHeightRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockHeight uint64, - script []byte, -) { - - observerLocalResponse, err := observerLocal.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ - BlockHeight: blockHeight, - Script: script, - Arguments: make([][]byte, 0), - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ - BlockHeight: blockHeight, - Script: script, - Arguments: make([][]byte, 0), - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ - BlockHeight: blockHeight, - Script: script, - Arguments: make([][]byte, 0), - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Value, observerLocalResponse.Value) - require.Equal(s.T(), accessNodeResponse.Value, observerUpstreamResponse.Value) -} - -func (s *ObserverIndexerEnabledSuite) checkExecuteScriptAtBlockIDRPC( - ctx context.Context, - observerLocal accessproto.AccessAPIClient, - observerUpstream accessproto.AccessAPIClient, - accessNode accessproto.AccessAPIClient, - blockId []byte, - script []byte, -) { - - observerLocalResponse, err := observerLocal.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ - BlockId: blockId, - Script: script, - Arguments: make([][]byte, 0), - }) - require.NoError(s.T(), err) - - observerUpstreamResponse, err := observerUpstream.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ - BlockId: blockId, - Script: script, - Arguments: make([][]byte, 0), - }) - require.NoError(s.T(), err) - - accessNodeResponse, err := accessNode.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ - BlockId: blockId, - Script: script, - Arguments: make([][]byte, 0), - }) - require.NoError(s.T(), err) - - require.Equal(s.T(), accessNodeResponse.Value, observerLocalResponse.Value) - require.Equal(s.T(), accessNodeResponse.Value, observerUpstreamResponse.Value) -} From eaf2bc46e490a090ac9a2f27151dd71ebc806328 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Fri, 5 Apr 2024 18:17:59 -0500 Subject: [PATCH 074/148] Add storage health check flags for atree migration --- cmd/util/cmd/execution-state-extract/cmd.go | 16 +++++ .../execution_state_extract.go | 2 + .../migrations/atree_register_migration.go | 68 +++++++++++++++++++ .../atree_register_migration_test.go | 2 + 4 files changed, 88 insertions(+) diff --git a/cmd/util/cmd/execution-state-extract/cmd.go b/cmd/util/cmd/execution-state-extract/cmd.go index f2d2cab7c76..7cd4ed7bdeb 100644 --- a/cmd/util/cmd/execution-state-extract/cmd.go +++ b/cmd/util/cmd/execution-state-extract/cmd.go @@ -34,6 +34,8 @@ var ( flagLogVerboseValidationError bool flagAllowPartialStateFromPayloads bool flagContinueMigrationOnValidationError bool + flagCheckStorageHealthBeforeMigration bool + flagCheckStorageHealthAfterMigration bool flagInputPayloadFileName string flagOutputPayloadFileName string flagOutputPayloadByAddresses string @@ -82,6 +84,12 @@ func init() { Cmd.Flags().BoolVar(&flagAllowPartialStateFromPayloads, "allow-partial-state-from-payload-file", false, "allow input payload file containing partial state (e.g. not all accounts)") + Cmd.Flags().BoolVar(&flagCheckStorageHealthBeforeMigration, "check-storage-health-before", false, + "check (atree) storage health before migration") + + Cmd.Flags().BoolVar(&flagCheckStorageHealthAfterMigration, "check-storage-health-after", false, + "check (atree) storage health after migration") + Cmd.Flags().BoolVar(&flagContinueMigrationOnValidationError, "continue-migration-on-validation-errors", false, "continue migration even if validation fails") @@ -248,6 +256,14 @@ func run(*cobra.Command, []string) { log.Warn().Msgf("atree migration has verbose validation error logging enabled which may increase size of log") } + if flagCheckStorageHealthBeforeMigration { + log.Warn().Msgf("--check-storage-health-before flag is enabled and will increase duration of migration") + } + + if flagCheckStorageHealthAfterMigration { + log.Warn().Msgf("--check-storage-health-after flag is enabled and will increase duration of migration") + } + var inputMsg string if len(flagInputPayloadFileName) > 0 { // Input is payloads diff --git a/cmd/util/cmd/execution-state-extract/execution_state_extract.go b/cmd/util/cmd/execution-state-extract/execution_state_extract.go index 7277a1f110a..9f302ad0f5a 100644 --- a/cmd/util/cmd/execution-state-extract/execution_state_extract.go +++ b/cmd/util/cmd/execution-state-extract/execution_state_extract.go @@ -365,6 +365,8 @@ func newMigrations( flagValidateMigration, flagLogVerboseValidationError, flagContinueMigrationOnValidationError, + flagCheckStorageHealthBeforeMigration, + flagCheckStorageHealthAfterMigration, ), &migrators.DeduplicateContractNamesMigration{}, diff --git a/cmd/util/ledger/migrations/atree_register_migration.go b/cmd/util/ledger/migrations/atree_register_migration.go index 97b17aca5a8..f982e961063 100644 --- a/cmd/util/ledger/migrations/atree_register_migration.go +++ b/cmd/util/ledger/migrations/atree_register_migration.go @@ -39,6 +39,8 @@ type AtreeRegisterMigrator struct { validateMigratedValues bool logVerboseValidationError bool continueMigrationOnValidationError bool + checkStorageHealthBeforeMigration bool + checkStorageHealthAfterMigration bool } var _ AccountBasedMigration = (*AtreeRegisterMigrator)(nil) @@ -49,6 +51,8 @@ func NewAtreeRegisterMigrator( validateMigratedValues bool, logVerboseValidationError bool, continueMigrationOnValidationError bool, + checkStorageHealthBeforeMigration bool, + checkStorageHealthAfterMigration bool, ) *AtreeRegisterMigrator { sampler := util2.NewTimedSampler(30 * time.Second) @@ -60,6 +64,8 @@ func NewAtreeRegisterMigrator( validateMigratedValues: validateMigratedValues, logVerboseValidationError: logVerboseValidationError, continueMigrationOnValidationError: continueMigrationOnValidationError, + checkStorageHealthBeforeMigration: checkStorageHealthBeforeMigration, + checkStorageHealthAfterMigration: checkStorageHealthAfterMigration, } return migrator @@ -94,6 +100,17 @@ func (m *AtreeRegisterMigrator) MigrateAccount( return nil, fmt.Errorf("failed to create migrator runtime: %w", err) } + // Check storage health before migration, if enabled. + if m.checkStorageHealthBeforeMigration { + err = checkStorageHealth(address, mr.Storage, oldPayloads) + if err != nil { + m.log.Warn(). + Err(err). + Str("account", address.Hex()). + Msg("storage health check before migration failed") + } + } + // keep track of all storage maps that were accessed // if they are empty they won't be changed, but we still need to copy them over storageMapIds := make(map[string]struct{}) @@ -144,9 +161,60 @@ func (m *AtreeRegisterMigrator) MigrateAccount( }) } + // Check storage health after migration, if enabled. + if m.checkStorageHealthAfterMigration { + mr, err := newMigratorRuntime(address, newPayloads) + if err != nil { + return nil, fmt.Errorf("failed to create migrator runtime: %w", err) + } + + err = checkStorageHealth(address, mr.Storage, newPayloads) + if err != nil { + m.log.Warn(). + Err(err). + Str("account", address.Hex()). + Msg("storage health check after migration failed") + } + } + return newPayloads, nil } +func checkStorageHealth( + address common.Address, + storage *runtime.Storage, + payloads []*ledger.Payload, +) error { + + for _, payload := range payloads { + registerID, _, err := convert.PayloadToRegister(payload) + if err != nil { + return fmt.Errorf("failed to convert payload to register: %w", err) + } + + if !registerID.IsSlabIndex() { + continue + } + + // Convert the register ID to a storage ID. + slabID := atree.NewStorageID( + atree.Address([]byte(registerID.Owner)), + atree.StorageIndex([]byte(registerID.Key[1:]))) + + // Retrieve the slab. + _, _, err = storage.Retrieve(slabID) + if err != nil { + return fmt.Errorf("failed to retrieve slab %s: %w", slabID, err) + } + } + + for _, domain := range domains { + _ = storage.GetStorageMap(address, domain, false) + } + + return storage.CheckHealth() +} + func (m *AtreeRegisterMigrator) migrateAccountStorage( mr *migratorRuntime, storageMapIds map[string]struct{}, diff --git a/cmd/util/ledger/migrations/atree_register_migration_test.go b/cmd/util/ledger/migrations/atree_register_migration_test.go index 9f4017a5a57..d593e67b4b3 100644 --- a/cmd/util/ledger/migrations/atree_register_migration_test.go +++ b/cmd/util/ledger/migrations/atree_register_migration_test.go @@ -35,6 +35,8 @@ func TestAtreeRegisterMigration(t *testing.T) { true, false, false, + false, + false, ), }, ), From b9aa85c5220930cfa618e0653689e9b7c48fbb1d Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Mon, 8 Apr 2024 15:54:17 +0300 Subject: [PATCH 075/148] Updated integration test according to comments --- .../access/cohort3/grpc_state_stream_test.go | 74 +++++++++++++------ .../cohort3/grpc_streaming_blocks_test.go | 63 ++++++++++++---- 2 files changed, 100 insertions(+), 37 deletions(-) diff --git a/integration/tests/access/cohort3/grpc_state_stream_test.go b/integration/tests/access/cohort3/grpc_state_stream_test.go index beb37bb0f89..685f38c1770 100644 --- a/integration/tests/access/cohort3/grpc_state_stream_test.go +++ b/integration/tests/access/cohort3/grpc_state_stream_test.go @@ -8,7 +8,6 @@ import ( "log" "sync" "testing" - "time" "github.com/rs/zerolog" "github.com/stretchr/testify/require" @@ -21,7 +20,9 @@ import ( "github.com/onflow/flow-go-sdk/test" "github.com/onflow/flow-go/engine/access/state_stream/backend" "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/engine/ghost/client" "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/utils/unittest" @@ -48,6 +49,7 @@ func TestGrpcStateStream(t *testing.T) { type GrpcStateStreamSuite struct { suite.Suite + lib.TestnetStateTracker log zerolog.Logger @@ -59,6 +61,8 @@ type GrpcStateStreamSuite struct { // RPC methods to test testedRPCs func() []subscribeEventsRPCTest + + ghostID flow.Identifier } func (s *GrpcStateStreamSuite) TearDownTest() { @@ -99,6 +103,14 @@ func (s *GrpcStateStreamSuite) SetupTest() { testnet.WithAdditionalFlag("--event-query-mode=execution-nodes-only"), ) + // add the ghost (access) node config + s.ghostID = unittest.IdentifierFixture() + ghostNode := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithID(s.ghostID), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsGhost()) + consensusConfigs := []func(config *testnet.NodeConfig){ testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=400ms"), testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), @@ -117,6 +129,7 @@ func (s *GrpcStateStreamSuite) SetupTest() { testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), testANConfig, // access_1 controlANConfig, // access_2 + ghostNode, // access ghost } // add the observer node config @@ -142,6 +155,13 @@ func (s *GrpcStateStreamSuite) SetupTest() { s.testedRPCs = s.getRPCs s.net.Start(s.ctx) + s.Track(s.T(), s.ctx, s.Ghost()) +} + +func (s *GrpcStateStreamSuite) Ghost() *client.GhostClient { + client, err := s.net.ContainerByID(s.ghostID).GhostClient() + require.NoError(s.T(), err, "could not get ghost client") + return client } // TestRestEventStreaming tests gRPC event streaming @@ -158,13 +178,20 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { sdkClientTestON, err := getClient(testONURL) s.Require().NoError(err) + // get the first block height + currentFinalized := s.BlockState.HighestFinalizedHeight() + blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) + + // Let the network run for this many blocks + blockCount := uint64(5) + // wait for the requested number of sealed blocks + s.BlockState.WaitForSealed(s.T(), blockA.Header.Height+blockCount) + txGenerator, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() s.Require().NoError(err) header, err := txGenerator.GetLatestSealedBlockHeader(s.ctx) s.Require().NoError(err) - time.Sleep(20 * time.Second) - var startValue interface{} txCount := 10 @@ -176,19 +203,16 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { startValue = header.Height } - testANStream, err := rpc.call(s.ctx, sdkClientTestAN, startValue, &executiondata.EventFilter{}) - s.Require().NoError(err) - testANEvents, testANErrs, err := SubscribeHandler(s.ctx, testANStream.Recv, eventsResponseHandler) + testANRecv := rpc.call(s.ctx, sdkClientTestAN, startValue, &executiondata.EventFilter{}) + testANEvents, testANErrs, err := SubscribeHandler(s.ctx, testANRecv, eventsResponseHandler) s.Require().NoError(err) - controlANStream, err := rpc.call(s.ctx, sdkClientControlAN, startValue, &executiondata.EventFilter{}) - s.Require().NoError(err) - controlANEvents, controlANErrs, err := SubscribeHandler(s.ctx, controlANStream.Recv, eventsResponseHandler) + controlANRecv := rpc.call(s.ctx, sdkClientControlAN, startValue, &executiondata.EventFilter{}) + controlANEvents, controlANErrs, err := SubscribeHandler(s.ctx, controlANRecv, eventsResponseHandler) s.Require().NoError(err) - testONStream, err := rpc.call(s.ctx, sdkClientTestON, startValue, &executiondata.EventFilter{}) - s.Require().NoError(err) - testONEvents, testONErrs, err := SubscribeHandler(s.ctx, testONStream.Recv, eventsResponseHandler) + testONRecv := rpc.call(s.ctx, sdkClientTestON, startValue, &executiondata.EventFilter{}) + testONEvents, testONErrs, err := SubscribeHandler(s.ctx, testONRecv, eventsResponseHandler) s.Require().NoError(err) if rpc.generateEvents { @@ -272,7 +296,7 @@ func (s *GrpcStateStreamSuite) generateEvents(client *testnet.Client, txCount in type subscribeEventsRPCTest struct { name string - call func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) (executiondata.ExecutionDataAPI_SubscribeEventsClient, error) + call func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) generateEvents bool // add ability to integration test generate new events or use old events to decrease running test time } @@ -280,50 +304,58 @@ func (s *GrpcStateStreamSuite) getRPCs() []subscribeEventsRPCTest { return []subscribeEventsRPCTest{ { name: "SubscribeEventsFromLatest", - call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, _ interface{}, filter *executiondata.EventFilter) (executiondata.ExecutionDataAPI_SubscribeEventsClient, error) { - return client.SubscribeEventsFromLatest(ctx, &executiondata.SubscribeEventsFromLatestRequest{ + call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, _ interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) { + stream, err := client.SubscribeEventsFromLatest(ctx, &executiondata.SubscribeEventsFromLatestRequest{ EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, Filter: filter, HeartbeatInterval: 1, }) + s.Require().NoError(err) + return stream.Recv }, generateEvents: true, }, { name: "SubscribeEvents", - call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, _ interface{}, filter *executiondata.EventFilter) (executiondata.ExecutionDataAPI_SubscribeEventsClient, error) { + call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, _ interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) { //nolint: staticcheck - return client.SubscribeEvents(ctx, &executiondata.SubscribeEventsRequest{ + stream, err := client.SubscribeEvents(ctx, &executiondata.SubscribeEventsRequest{ StartBlockId: convert.IdentifierToMessage(flow.ZeroID), StartBlockHeight: 0, EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, Filter: filter, HeartbeatInterval: 1, }) + s.Require().NoError(err) + return stream.Recv }, generateEvents: true, }, { name: "SubscribeEventsFromStartBlockID", - call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) (executiondata.ExecutionDataAPI_SubscribeEventsClient, error) { - return client.SubscribeEventsFromStartBlockID(ctx, &executiondata.SubscribeEventsFromStartBlockIDRequest{ + call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) { + stream, err := client.SubscribeEventsFromStartBlockID(ctx, &executiondata.SubscribeEventsFromStartBlockIDRequest{ StartBlockId: startValue.([]byte), EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, Filter: filter, HeartbeatInterval: 1, }) + s.Require().NoError(err) + return stream.Recv }, generateEvents: false, // use previous events }, { name: "SubscribeEventsFromStartHeight", - call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) (executiondata.ExecutionDataAPI_SubscribeEventsClient, error) { - return client.SubscribeEventsFromStartHeight(ctx, &executiondata.SubscribeEventsFromStartHeightRequest{ + call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) { + stream, err := client.SubscribeEventsFromStartHeight(ctx, &executiondata.SubscribeEventsFromStartHeightRequest{ StartBlockHeight: startValue.(uint64), EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, Filter: filter, HeartbeatInterval: 1, }) + s.Require().NoError(err) + return stream.Recv }, generateEvents: false, // use previous events }, diff --git a/integration/tests/access/cohort3/grpc_streaming_blocks_test.go b/integration/tests/access/cohort3/grpc_streaming_blocks_test.go index 7f1f196106d..75b0a4b5ffd 100644 --- a/integration/tests/access/cohort3/grpc_streaming_blocks_test.go +++ b/integration/tests/access/cohort3/grpc_streaming_blocks_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "testing" - "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -14,7 +13,9 @@ import ( "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/engine/ghost/client" "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -28,6 +29,7 @@ func TestGrpcBlocksStream(t *testing.T) { type GrpcBlocksStreamSuite struct { suite.Suite + lib.TestnetStateTracker log zerolog.Logger @@ -39,6 +41,8 @@ type GrpcBlocksStreamSuite struct { // RPC methods to test testedRPCs func() []subscribeBlocksRPCTest + + ghostID flow.Identifier } func (s *GrpcBlocksStreamSuite) TearDownTest() { @@ -76,6 +80,14 @@ func (s *GrpcBlocksStreamSuite) SetupTest() { testnet.WithLogLevel(zerolog.FatalLevel), } + // add the ghost (access) node config + s.ghostID = unittest.IdentifierFixture() + ghostNode := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithID(s.ghostID), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsGhost()) + nodeConfigs := []testnet.NodeConfig{ testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), @@ -86,6 +98,7 @@ func (s *GrpcBlocksStreamSuite) SetupTest() { testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), accessConfig, + ghostNode, // access ghost } // add the observer node config @@ -111,6 +124,13 @@ func (s *GrpcBlocksStreamSuite) SetupTest() { s.testedRPCs = s.getRPCs s.net.Start(s.ctx) + s.Track(s.T(), s.ctx, s.Ghost()) +} + +func (s *GrpcBlocksStreamSuite) Ghost() *client.GhostClient { + client, err := s.net.ContainerByID(s.ghostID).GhostClient() + require.NoError(s.T(), err, "could not get ghost client") + return client } // TestRestEventStreaming tests gRPC event streaming @@ -123,13 +143,20 @@ func (s *GrpcBlocksStreamSuite) TestHappyPath() { observerClient, err := getAccessAPIClient(observerURL) s.Require().NoError(err) + // get the first block height + currentFinalized := s.BlockState.HighestFinalizedHeight() + blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) + + // Let the network run for this many blocks + blockCount := uint64(5) + // wait for the requested number of sealed blocks + s.BlockState.WaitForSealed(s.T(), blockA.Header.Height+blockCount) + txGenerator, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() s.Require().NoError(err) header, err := txGenerator.GetLatestSealedBlockHeader(s.ctx) s.Require().NoError(err) - time.Sleep(20 * time.Second) - var startValue interface{} txCount := 10 @@ -141,14 +168,12 @@ func (s *GrpcBlocksStreamSuite) TestHappyPath() { startValue = header.Height } - accessStream, err := rpc.call(s.ctx, accessClient, startValue) - s.Require().NoError(err) - accessBlocks, accessBlockErrs, err := SubscribeHandler(s.ctx, accessStream.Recv, blockResponseHandler) + accessRecv := rpc.call(s.ctx, accessClient, startValue) + accessBlocks, accessBlockErrs, err := SubscribeHandler(s.ctx, accessRecv, blockResponseHandler) s.Require().NoError(err) - observerStream, err := rpc.call(s.ctx, observerClient, startValue) - s.Require().NoError(err) - observerBlocks, observerBlockErrs, err := SubscribeHandler(s.ctx, observerStream.Recv, blockResponseHandler) + observerRecv := rpc.call(s.ctx, observerClient, startValue) + observerBlocks, observerBlockErrs, err := SubscribeHandler(s.ctx, observerRecv, blockResponseHandler) s.Require().NoError(err) foundANTxCount := 0 @@ -211,38 +236,44 @@ func compareBlocks(t *testing.T, accessBlock *flow.Block, observerBlock *flow.Bl type subscribeBlocksRPCTest struct { name string - call func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) (accessproto.AccessAPI_SubscribeBlocksFromLatestClient, error) + call func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) func() (*accessproto.SubscribeBlocksResponse, error) } func (s *GrpcBlocksStreamSuite) getRPCs() []subscribeBlocksRPCTest { return []subscribeBlocksRPCTest{ { name: "SubscribeBlocksFromLatest", - call: func(ctx context.Context, client accessproto.AccessAPIClient, _ interface{}) (accessproto.AccessAPI_SubscribeBlocksFromLatestClient, error) { - return client.SubscribeBlocksFromLatest(ctx, &accessproto.SubscribeBlocksFromLatestRequest{ + call: func(ctx context.Context, client accessproto.AccessAPIClient, _ interface{}) func() (*accessproto.SubscribeBlocksResponse, error) { + stream, err := client.SubscribeBlocksFromLatest(ctx, &accessproto.SubscribeBlocksFromLatestRequest{ BlockStatus: entities.BlockStatus_BLOCK_FINALIZED, FullBlockResponse: true, }) + s.Require().NoError(err) + return stream.Recv }, }, { name: "SubscribeBlocksFromStartBlockID", - call: func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) (accessproto.AccessAPI_SubscribeBlocksFromLatestClient, error) { - return client.SubscribeBlocksFromStartBlockID(ctx, &accessproto.SubscribeBlocksFromStartBlockIDRequest{ + call: func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) func() (*accessproto.SubscribeBlocksResponse, error) { + stream, err := client.SubscribeBlocksFromStartBlockID(ctx, &accessproto.SubscribeBlocksFromStartBlockIDRequest{ StartBlockId: startValue.([]byte), BlockStatus: entities.BlockStatus_BLOCK_FINALIZED, FullBlockResponse: true, }) + s.Require().NoError(err) + return stream.Recv }, }, { name: "SubscribeBlocksFromStartHeight", - call: func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) (accessproto.AccessAPI_SubscribeBlocksFromLatestClient, error) { - return client.SubscribeBlocksFromStartHeight(ctx, &accessproto.SubscribeBlocksFromStartHeightRequest{ + call: func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) func() (*accessproto.SubscribeBlocksResponse, error) { + stream, err := client.SubscribeBlocksFromStartHeight(ctx, &accessproto.SubscribeBlocksFromStartHeightRequest{ StartBlockHeight: startValue.(uint64), BlockStatus: entities.BlockStatus_BLOCK_FINALIZED, FullBlockResponse: true, }) + s.Require().NoError(err) + return stream.Recv }, }, } From 6070f4e108b7b1fbfe2d70fb55ee9835fa32467c Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 8 Apr 2024 12:30:34 -0400 Subject: [PATCH 076/148] add integration tests skeleton - refactor epoch integration tests --- integration/testnet/network.go | 4 +- integration/tests/epochs/base_suite.go | 167 +++++++++++ .../cohort1/epoch_static_transition_test.go | 4 +- ...e.go => dynamic_epoch_transition_suite.go} | 271 ++++-------------- .../recover_epoch/recover_epoch_efm_test.go | 32 +++ .../tests/epochs/recover_epoch/suite.go | 24 ++ 6 files changed, 286 insertions(+), 216 deletions(-) create mode 100644 integration/tests/epochs/base_suite.go rename integration/tests/epochs/{suite.go => dynamic_epoch_transition_suite.go} (71%) create mode 100644 integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go create mode 100644 integration/tests/epochs/recover_epoch/suite.go diff --git a/integration/testnet/network.go b/integration/testnet/network.go index 5a4484c39be..ab2b942055a 100644 --- a/integration/testnet/network.go +++ b/integration/testnet/network.go @@ -183,10 +183,10 @@ func (net *FlowNetwork) Identities() flow.IdentityList { } // ContainersByRole returns all the containers in the network with the specified role -func (net *FlowNetwork) ContainersByRole(role flow.Role) []*Container { +func (net *FlowNetwork) ContainersByRole(role flow.Role, ghost bool) []*Container { cl := make([]*Container, 0, len(net.Containers)) for _, c := range net.Containers { - if c.Config.Role == role { + if c.Config.Role == role && c.Config.Ghost == ghost { cl = append(cl, c) } } diff --git a/integration/tests/epochs/base_suite.go b/integration/tests/epochs/base_suite.go new file mode 100644 index 00000000000..d7a8356228f --- /dev/null +++ b/integration/tests/epochs/base_suite.go @@ -0,0 +1,167 @@ +// Package epochs contains common functionality for the epoch integration test suite. +// Individual tests exist in sub-directories of this: cohort1, cohort2... +// Each cohort is run as a separate, sequential CI job. Since the epoch tests are long +// and resource-heavy, we split them into several cohorts, which can be run in parallel. +// +// If a new cohort is added in the future, it must be added to: +// - ci.yml, flaky-test-monitor.yml, bors.toml (ensure new cohort of tests is run) +// - Makefile (include new cohort in integration-test directive, etc.) +package epochs + +import ( + "context" + "fmt" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/ghost/client" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// BaseSuite encapsulates common functionality for epoch integration tests. +type BaseSuite struct { + suite.Suite + lib.TestnetStateTracker + cancel context.CancelFunc + log zerolog.Logger + net *testnet.FlowNetwork + ghostID flow.Identifier + + Client *testnet.Client + Ctx context.Context + + // Epoch config (lengths in views) + StakingAuctionLen uint64 + DKGPhaseLen uint64 + EpochLen uint64 + EpochCommitSafetyThreshold uint64 + // Whether approvals are required for sealing (we only enable for VN tests because + // requiring approvals requires a longer DKG period to avoid flakiness) + RequiredSealApprovals uint // defaults to 0 (no approvals required) + // Consensus Node proposal duration + ConsensusProposalDuration time.Duration +} + +// SetupTest is run automatically by the testing framework before each test case. +func (s *BaseSuite) SetupTest() { + // If unset, use default value 100ms + if s.ConsensusProposalDuration == 0 { + s.ConsensusProposalDuration = time.Millisecond * 100 + } + + minEpochLength := s.StakingAuctionLen + s.DKGPhaseLen*3 + 20 + // ensure epoch lengths are set correctly + require.Greater(s.T(), s.EpochLen, minEpochLength+s.EpochCommitSafetyThreshold, "epoch too short") + + s.Ctx, s.cancel = context.WithCancel(context.Background()) + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + collectionConfigs := []func(*testnet.NodeConfig){ + testnet.WithAdditionalFlag("--hotstuff-proposal-duration=100ms"), + testnet.WithLogLevel(zerolog.WarnLevel)} + + consensusConfigs := []func(config *testnet.NodeConfig){ + testnet.WithAdditionalFlag(fmt.Sprintf("--cruise-ctl-fallback-proposal-duration=%s", s.ConsensusProposalDuration)), + testnet.WithAdditionalFlag("--cruise-ctl-enabled=false"), // disable cruise control for integration tests + testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", s.RequiredSealApprovals)), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", s.RequiredSealApprovals)), + testnet.WithLogLevel(zerolog.DebugLevel)} + + // a ghost node masquerading as an access node + s.ghostID = unittest.IdentifierFixture() + ghostNode := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.WithID(s.ghostID), + testnet.AsGhost()) + + confs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.WarnLevel)), + testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.WarnLevel)), + testnet.NewNodeConfig(flow.RoleCollection, collectionConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel), testnet.WithAdditionalFlag("--extensive-logging=true")), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel)), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.WarnLevel)), + ghostNode, + } + + netConf := testnet.NewNetworkConfigWithEpochConfig("epochs-tests", confs, s.StakingAuctionLen, s.DKGPhaseLen, s.EpochLen, s.EpochCommitSafetyThreshold) + + // initialize the network + s.net = testnet.PrepareFlowNetwork(s.T(), netConf, flow.Localnet) + + // start the network + s.net.Start(s.Ctx) + + // start tracking blocks + s.Track(s.T(), s.Ctx, s.Ghost()) + + // use AN1 for test-related queries - the AN join/leave test will replace AN2 + client, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + require.NoError(s.T(), err) + + s.Client = client + + // log network info periodically to aid in debugging future flaky tests + go lib.LogStatusPeriodically(s.T(), s.Ctx, s.log, s.Client, 5*time.Second) +} + +func (s *BaseSuite) Ghost() *client.GhostClient { + client, err := s.net.ContainerByID(s.ghostID).GhostClient() + require.NoError(s.T(), err, "could not get ghost Client") + return client +} + +// TimedLogf logs the message using t.Log and the suite logger, but prefixes the current time. +// This enables viewing logs inline with Docker logs as well as other test logs. +func (s *BaseSuite) TimedLogf(msg string, args ...interface{}) { + s.log.Info().Msgf(msg, args...) + args = append([]interface{}{time.Now().String()}, args...) + s.T().Logf("%s - "+msg, args...) +} + +//func (s *BaseSuite) TearDownTest() { +// s.log.Info().Msg("================> Start TearDownTest") +// s.net.Remove() +// s.cancel() +// s.log.Info().Msg("================> Finish TearDownTest") +//} + +// AwaitEpochPhase waits for the given phase, in the given epoch. +func (s *BaseSuite) AwaitEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase, waitFor, tick time.Duration) { + var actualEpoch uint64 + var actualPhase flow.EpochPhase + condition := func() bool { + snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) + require.NoError(s.T(), err) + + actualEpoch, err = snapshot.Epochs().Current().Counter() + require.NoError(s.T(), err) + actualPhase, err = snapshot.Phase() + require.NoError(s.T(), err) + + return actualEpoch == expectedEpoch && actualPhase == expectedPhase + } + require.Eventuallyf(s.T(), condition, waitFor, tick, "did not reach expectedEpoch %d phase %s within %s. Last saw epoch=%d and phase=%s", expectedEpoch, expectedPhase, waitFor, actualEpoch, actualPhase) +} + +// GetContainersByRole returns all containers from the network for the specified role, making sure the containers are not ghost nodes. +func (s *BaseSuite) GetContainersByRole(role flow.Role) []*testnet.Container { + nodes := s.net.ContainersByRole(role, false) + require.True(s.T(), len(nodes) > 0) + return nodes +} diff --git a/integration/tests/epochs/cohort1/epoch_static_transition_test.go b/integration/tests/epochs/cohort1/epoch_static_transition_test.go index ae1708f514e..6c8ab6d6d3c 100644 --- a/integration/tests/epochs/cohort1/epoch_static_transition_test.go +++ b/integration/tests/epochs/cohort1/epoch_static_transition_test.go @@ -18,7 +18,7 @@ func TestEpochStaticTransition(t *testing.T) { // StaticEpochTransitionSuite is the suite used for epoch transition tests // with a static identity table. type StaticEpochTransitionSuite struct { - epochs.Suite + epochs.DynamicEpochTransitionSuite } func (s *StaticEpochTransitionSuite) SetupTest() { @@ -30,7 +30,7 @@ func (s *StaticEpochTransitionSuite) SetupTest() { s.EpochCommitSafetyThreshold = 50 // run the generic setup, which starts up the network - s.Suite.SetupTest() + s.BaseSuite.SetupTest() } // TestStaticEpochTransition asserts epoch state transitions over full epoch diff --git a/integration/tests/epochs/suite.go b/integration/tests/epochs/dynamic_epoch_transition_suite.go similarity index 71% rename from integration/tests/epochs/suite.go rename to integration/tests/epochs/dynamic_epoch_transition_suite.go index e0efecdf80a..192d931339f 100644 --- a/integration/tests/epochs/suite.go +++ b/integration/tests/epochs/dynamic_epoch_transition_suite.go @@ -14,21 +14,17 @@ import ( "strings" "time" - "github.com/rs/zerolog" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "github.com/onflow/cadence" "github.com/onflow/crypto" "github.com/onflow/flow-core-contracts/lib/go/templates" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go/engine/ghost/client" "github.com/onflow/flow-go/fvm/blueprints" "github.com/onflow/flow-go/integration/testnet" - "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/integration/utils" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/encodable" @@ -44,119 +40,24 @@ import ( // NOTE: The snapshot must reference a block within the second epoch. type nodeUpdateValidation func(ctx context.Context, env templates.Environment, snapshot *inmem.Snapshot, info *StakedNodeOperationInfo) -// Suite encapsulates common functionality for epoch integration tests. -type Suite struct { - suite.Suite - lib.TestnetStateTracker - cancel context.CancelFunc - log zerolog.Logger - net *testnet.FlowNetwork - ghostID flow.Identifier - - Client *testnet.Client - Ctx context.Context - - // Epoch config (lengths in views) - StakingAuctionLen uint64 - DKGPhaseLen uint64 - EpochLen uint64 - EpochCommitSafetyThreshold uint64 - // Whether approvals are required for sealing (we only enable for VN tests because - // requiring approvals requires a longer DKG period to avoid flakiness) - RequiredSealApprovals uint // defaults to 0 (no approvals required) - // Consensus Node proposal duration - ConsensusProposalDuration time.Duration -} - -// SetupTest is run automatically by the testing framework before each test case. -func (s *Suite) SetupTest() { - // If unset, use default value 100ms - if s.ConsensusProposalDuration == 0 { - s.ConsensusProposalDuration = time.Millisecond * 100 - } - - minEpochLength := s.StakingAuctionLen + s.DKGPhaseLen*3 + 20 - // ensure epoch lengths are set correctly - require.Greater(s.T(), s.EpochLen, minEpochLength+s.EpochCommitSafetyThreshold, "epoch too short") - - s.Ctx, s.cancel = context.WithCancel(context.Background()) - s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) - s.log.Info().Msg("================> SetupTest") - defer func() { - s.log.Info().Msg("================> Finish SetupTest") - }() - - collectionConfigs := []func(*testnet.NodeConfig){ - testnet.WithAdditionalFlag("--hotstuff-proposal-duration=100ms"), - testnet.WithLogLevel(zerolog.WarnLevel)} - - consensusConfigs := []func(config *testnet.NodeConfig){ - testnet.WithAdditionalFlag(fmt.Sprintf("--cruise-ctl-fallback-proposal-duration=%s", s.ConsensusProposalDuration)), - testnet.WithAdditionalFlag("--cruise-ctl-enabled=false"), // disable cruise control for integration tests - testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", s.RequiredSealApprovals)), - testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", s.RequiredSealApprovals)), - testnet.WithLogLevel(zerolog.WarnLevel)} - - // a ghost node masquerading as an access node - s.ghostID = unittest.IdentifierFixture() - ghostNode := testnet.NewNodeConfig( - flow.RoleAccess, - testnet.WithLogLevel(zerolog.FatalLevel), - testnet.WithID(s.ghostID), - testnet.AsGhost()) - - confs := []testnet.NodeConfig{ - testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.WarnLevel)), - testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.WarnLevel)), - testnet.NewNodeConfig(flow.RoleCollection, collectionConfigs...), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel), testnet.WithAdditionalFlag("--extensive-logging=true")), - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel)), - testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.WarnLevel)), - ghostNode, - } - - netConf := testnet.NewNetworkConfigWithEpochConfig("epochs-tests", confs, s.StakingAuctionLen, s.DKGPhaseLen, s.EpochLen, s.EpochCommitSafetyThreshold) - - // initialize the network - s.net = testnet.PrepareFlowNetwork(s.T(), netConf, flow.Localnet) - - // start the network - s.net.Start(s.Ctx) - - // start tracking blocks - s.Track(s.T(), s.Ctx, s.Ghost()) - - // use AN1 for test-related queries - the AN join/leave test will replace AN2 - client, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() - require.NoError(s.T(), err) - - s.Client = client - - // log network info periodically to aid in debugging future flaky tests - go lib.LogStatusPeriodically(s.T(), s.Ctx, s.log, s.Client, 5*time.Second) -} - -func (s *Suite) Ghost() *client.GhostClient { - client, err := s.net.ContainerByID(s.ghostID).GhostClient() - require.NoError(s.T(), err, "could not get ghost Client") - return client +// DynamicEpochTransitionSuite is the suite used for epoch transitions tests +// with a dynamic identity table. +type DynamicEpochTransitionSuite struct { + BaseSuite } -// TimedLogf logs the message using t.Log and the suite logger, but prefixes the current time. -// This enables viewing logs inline with Docker logs as well as other test logs. -func (s *Suite) TimedLogf(msg string, args ...interface{}) { - s.log.Info().Msgf(msg, args...) - args = append([]interface{}{time.Now().String()}, args...) - s.T().Logf("%s - "+msg, args...) -} +func (s *DynamicEpochTransitionSuite) SetupTest() { + // use a longer staking auction length to accommodate staking operations for joining/leaving nodes + // NOTE: this value is set fairly aggressively to ensure shorter test times. + // If flakiness due to failure to complete staking operations in time is observed, + // try increasing (by 10-20 views). + s.StakingAuctionLen = 50 + s.DKGPhaseLen = 50 + s.EpochLen = 250 + s.EpochCommitSafetyThreshold = 20 -func (s *Suite) TearDownTest() { - s.log.Info().Msg("================> Start TearDownTest") - s.net.Remove() - s.cancel() - s.log.Info().Msg("================> Finish TearDownTest") + // run the generic setup, which starts up the network + s.BaseSuite.SetupTest() } // StakedNodeOperationInfo struct contains all the node information needed to @@ -189,7 +90,7 @@ type StakedNodeOperationInfo struct { // NOTE 2: This function performs steps 1-6 in one custom transaction, to reduce // the time taken by each test case. Individual transactions for each step can be // found in Git history, for example: 9867056a8b7246655047bc457f9000398f6687c0. -func (s *Suite) StakeNode(ctx context.Context, env templates.Environment, role flow.Role) *StakedNodeOperationInfo { +func (s *DynamicEpochTransitionSuite) StakeNode(ctx context.Context, env templates.Environment, role flow.Role) *StakedNodeOperationInfo { stakingAccountKey, networkingKey, stakingKey, machineAccountKey, machineAccountPubKey := s.generateAccountKeys(role) nodeID := flow.MakeID(stakingKey.PublicKey().Encode()) @@ -257,7 +158,7 @@ func (s *Suite) StakeNode(ctx context.Context, env templates.Environment, role f } // generates initial keys needed to bootstrap account -func (s *Suite) generateAccountKeys(role flow.Role) ( +func (s *DynamicEpochTransitionSuite) generateAccountKeys(role flow.Role) ( operatorAccountKey, networkingKey, stakingKey, @@ -285,7 +186,7 @@ func (s *Suite) generateAccountKeys(role flow.Role) ( // removeNodeFromProtocol removes the given node from the protocol. // NOTE: assumes staking occurs in first epoch (counter 0) -func (s *Suite) removeNodeFromProtocol(ctx context.Context, env templates.Environment, nodeID flow.Identifier) { +func (s *DynamicEpochTransitionSuite) removeNodeFromProtocol(ctx context.Context, env templates.Environment, nodeID flow.Identifier) { result, err := s.submitAdminRemoveNodeTx(ctx, env, nodeID) require.NoError(s.T(), err) require.NoError(s.T(), result.Error) @@ -295,7 +196,7 @@ func (s *Suite) removeNodeFromProtocol(ctx context.Context, env templates.Enviro } // submitAdminRemoveNodeTx will submit the admin remove node transaction -func (s *Suite) submitAdminRemoveNodeTx(ctx context.Context, +func (s *DynamicEpochTransitionSuite) submitAdminRemoveNodeTx(ctx context.Context, env templates.Environment, nodeID flow.Identifier, ) (*sdk.TransactionResult, error) { @@ -320,14 +221,14 @@ func (s *Suite) submitAdminRemoveNodeTx(ctx context.Context, return result, nil } -func (s *Suite) ExecuteGetProposedTableScript(ctx context.Context, env templates.Environment, nodeID flow.Identifier) cadence.Value { +func (s *DynamicEpochTransitionSuite) ExecuteGetProposedTableScript(ctx context.Context, env templates.Environment, nodeID flow.Identifier) cadence.Value { v, err := s.Client.ExecuteScriptBytes(ctx, templates.GenerateReturnProposedTableScript(env), []cadence.Value{}) require.NoError(s.T(), err) return v } // ExecuteGetNodeInfoScript executes a script to get staking info about the given node. -func (s *Suite) ExecuteGetNodeInfoScript(ctx context.Context, env templates.Environment, nodeID flow.Identifier) cadence.Value { +func (s *DynamicEpochTransitionSuite) ExecuteGetNodeInfoScript(ctx context.Context, env templates.Environment, nodeID flow.Identifier) cadence.Value { cdcNodeID, err := cadence.NewString(nodeID.String()) require.NoError(s.T(), err) v, err := s.Client.ExecuteScriptBytes(ctx, templates.GenerateGetNodeInfoScript(env), []cadence.Value{cdcNodeID}) @@ -336,7 +237,7 @@ func (s *Suite) ExecuteGetNodeInfoScript(ctx context.Context, env templates.Envi } // SubmitSetApprovedListTx adds a node to the approved node list, this must be done when a node joins the protocol during the epoch staking phase -func (s *Suite) SubmitSetApprovedListTx(ctx context.Context, env templates.Environment, identities ...flow.Identifier) *sdk.TransactionResult { +func (s *DynamicEpochTransitionSuite) SubmitSetApprovedListTx(ctx context.Context, env templates.Environment, identities ...flow.Identifier) *sdk.TransactionResult { latestBlockID, err := s.Client.GetLatestBlockID(ctx) require.NoError(s.T(), err) @@ -362,7 +263,7 @@ func (s *Suite) SubmitSetApprovedListTx(ctx context.Context, env templates.Envir } // ExecuteReadApprovedNodesScript executes the return proposal table script and returns a list of approved nodes -func (s *Suite) ExecuteReadApprovedNodesScript(ctx context.Context, env templates.Environment) cadence.Value { +func (s *DynamicEpochTransitionSuite) ExecuteReadApprovedNodesScript(ctx context.Context, env templates.Environment) cadence.Value { v, err := s.Client.ExecuteScriptBytes(ctx, templates.GenerateGetApprovedNodesScript(env), []cadence.Value{}) require.NoError(s.T(), err) @@ -370,14 +271,14 @@ func (s *Suite) ExecuteReadApprovedNodesScript(ctx context.Context, env template } // getTestContainerName returns a name for a test container in the form of ${role}_${nodeID}_test -func (s *Suite) getTestContainerName(role flow.Role) string { - i := len(s.net.ContainersByRole(role)) + 1 +func (s *DynamicEpochTransitionSuite) getTestContainerName(role flow.Role) string { + i := len(s.net.ContainersByRole(role, false)) + 1 return fmt.Sprintf("%s_test_%d", role, i) } // assertNodeApprovedAndProposed executes the read approved nodes list and get proposed table scripts // and checks that the info.NodeID is in both list -func (s *Suite) assertNodeApprovedAndProposed(ctx context.Context, env templates.Environment, info *StakedNodeOperationInfo) { +func (s *DynamicEpochTransitionSuite) assertNodeApprovedAndProposed(ctx context.Context, env templates.Environment, info *StakedNodeOperationInfo) { // ensure node ID in approved list //approvedNodes := s.ExecuteReadApprovedNodesScript(Ctx, env) //require.Containsf(s.T(), approvedNodes.(cadence.Array).Values, cadence.String(info.NodeID.String()), "expected new node to be in approved nodes list: %x", info.NodeID) @@ -395,7 +296,7 @@ func (s *Suite) assertNodeApprovedAndProposed(ctx context.Context, env templates } // newTestContainerOnNetwork configures a new container on the suites network -func (s *Suite) newTestContainerOnNetwork(role flow.Role, info *StakedNodeOperationInfo) *testnet.Container { +func (s *DynamicEpochTransitionSuite) newTestContainerOnNetwork(role flow.Role, info *StakedNodeOperationInfo) *testnet.Container { containerConfigs := []func(config *testnet.NodeConfig){ testnet.WithLogLevel(zerolog.WarnLevel), testnet.WithID(info.NodeID), @@ -417,10 +318,8 @@ func (s *Suite) newTestContainerOnNetwork(role flow.Role, info *StakedNodeOperat nodeContainer.AddFlag("insecure-access-api", "false") accessNodeIDS := make([]string, 0) - for _, c := range s.net.ContainersByRole(flow.RoleAccess) { - if c.Config.Role == flow.RoleAccess && !c.Config.Ghost { - accessNodeIDS = append(accessNodeIDS, c.Config.NodeID.String()) - } + for _, c := range s.net.ContainersByRole(flow.RoleAccess, false) { + accessNodeIDS = append(accessNodeIDS, c.Config.NodeID.String()) } nodeContainer.AddFlag("access-node-ids", strings.Join(accessNodeIDS, ",")) } @@ -429,7 +328,7 @@ func (s *Suite) newTestContainerOnNetwork(role flow.Role, info *StakedNodeOperat } // StakeNewNode will stake a new node, and create the corresponding docker container for that node -func (s *Suite) StakeNewNode(ctx context.Context, env templates.Environment, role flow.Role) (*StakedNodeOperationInfo, *testnet.Container) { +func (s *DynamicEpochTransitionSuite) StakeNewNode(ctx context.Context, env templates.Environment, role flow.Role) (*StakedNodeOperationInfo, *testnet.Container) { // stake our new node info := s.StakeNode(ctx, env, role) @@ -442,40 +341,27 @@ func (s *Suite) StakeNewNode(ctx context.Context, env templates.Environment, rol return info, testContainer } -// getContainerToReplace return a container from the network, make sure the container is not a ghost -func (s *Suite) getContainerToReplace(role flow.Role) *testnet.Container { - nodes := s.net.ContainersByRole(role) - require.True(s.T(), len(nodes) > 0) - - for _, c := range nodes { - if !c.Config.Ghost { - return c - } - } - - return nil +// AwaitFinalizedView polls until it observes that the latest finalized block has a view +// greater than or equal to the input view. This is used to wait until when an epoch +// transition must have happened. +func (s *DynamicEpochTransitionSuite) AwaitFinalizedView(ctx context.Context, view uint64, waitFor, tick time.Duration) { + require.Eventually(s.T(), func() bool { + sealed := s.getLatestFinalizedHeader(ctx) + return sealed.View >= view + }, waitFor, tick) } -// AwaitEpochPhase waits for the given phase, in the given epoch. -func (s *Suite) AwaitEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase, waitFor, tick time.Duration) { - var actualEpoch uint64 - var actualPhase flow.EpochPhase - condition := func() bool { - snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) - require.NoError(s.T(), err) - - actualEpoch, err = snapshot.Epochs().Current().Counter() - require.NoError(s.T(), err) - actualPhase, err = snapshot.Phase() - require.NoError(s.T(), err) - - return actualEpoch == expectedEpoch && actualPhase == expectedPhase - } - require.Eventuallyf(s.T(), condition, waitFor, tick, "did not reach expectedEpoch %d phase %s within %s. Last saw epoch=%d and phase=%s", expectedEpoch, expectedPhase, waitFor, actualEpoch, actualPhase) +// getLatestFinalizedHeader retrieves the latest finalized block, as reported in LatestSnapshot. +func (s *DynamicEpochTransitionSuite) getLatestFinalizedHeader(ctx context.Context) *flow.Header { + snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) + require.NoError(s.T(), err) + finalized, err := snapshot.Head() + require.NoError(s.T(), err) + return finalized } // AssertInEpochPhase checks if we are in the phase of the given epoch. -func (s *Suite) AssertInEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase) { +func (s *DynamicEpochTransitionSuite) AssertInEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase) { snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) require.NoError(s.T(), err) actualEpoch, err := snapshot.Epochs().Current().Counter() @@ -491,7 +377,7 @@ func (s *Suite) AssertInEpochPhase(ctx context.Context, expectedEpoch uint64, ex } // AssertInEpoch requires actual epoch counter is equal to counter provided. -func (s *Suite) AssertInEpoch(ctx context.Context, expectedEpoch uint64) { +func (s *DynamicEpochTransitionSuite) AssertInEpoch(ctx context.Context, expectedEpoch uint64) { snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) require.NoError(s.T(), err) actualEpoch, err := snapshot.Epochs().Current().Counter() @@ -501,7 +387,7 @@ func (s *Suite) AssertInEpoch(ctx context.Context, expectedEpoch uint64) { // AssertNodeNotParticipantInEpoch asserts that the given node ID does not exist // in the epoch's identity table. -func (s *Suite) AssertNodeNotParticipantInEpoch(epoch protocol.Epoch, nodeID flow.Identifier) { +func (s *DynamicEpochTransitionSuite) AssertNodeNotParticipantInEpoch(epoch protocol.Epoch, nodeID flow.Identifier) { identities, err := epoch.InitialIdentities() require.NoError(s.T(), err) require.NotContains(s.T(), identities.NodeIDs(), nodeID) @@ -510,7 +396,7 @@ func (s *Suite) AssertNodeNotParticipantInEpoch(epoch protocol.Epoch, nodeID flo // AwaitSealedBlockHeightExceedsSnapshot polls until it observes that the latest // sealed block height has exceeded the snapshot height by numOfBlocks // the snapshot height and latest finalized height is greater than numOfBlocks. -func (s *Suite) AwaitSealedBlockHeightExceedsSnapshot(ctx context.Context, snapshot *inmem.Snapshot, threshold uint64, waitFor, tick time.Duration) { +func (s *DynamicEpochTransitionSuite) AwaitSealedBlockHeightExceedsSnapshot(ctx context.Context, snapshot *inmem.Snapshot, threshold uint64, waitFor, tick time.Duration) { header, err := snapshot.Head() require.NoError(s.T(), err) snapshotHeight := header.Height @@ -522,18 +408,8 @@ func (s *Suite) AwaitSealedBlockHeightExceedsSnapshot(ctx context.Context, snaps }, waitFor, tick) } -// AwaitFinalizedView polls until it observes that the latest finalized block has a view -// greater than or equal to the input view. This is used to wait until when an epoch -// transition must have happened. -func (s *Suite) AwaitFinalizedView(ctx context.Context, view uint64, waitFor, tick time.Duration) { - require.Eventually(s.T(), func() bool { - sealed := s.getLatestFinalizedHeader(ctx) - return sealed.View >= view - }, waitFor, tick) -} - // getLatestSealedHeader retrieves the latest sealed block, as reported in LatestSnapshot. -func (s *Suite) getLatestSealedHeader(ctx context.Context) *flow.Header { +func (s *DynamicEpochTransitionSuite) getLatestSealedHeader(ctx context.Context) *flow.Header { snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) require.NoError(s.T(), err) segment, err := snapshot.SealingSegment() @@ -542,18 +418,9 @@ func (s *Suite) getLatestSealedHeader(ctx context.Context) *flow.Header { return sealed.Header } -// getLatestFinalizedHeader retrieves the latest finalized block, as reported in LatestSnapshot. -func (s *Suite) getLatestFinalizedHeader(ctx context.Context) *flow.Header { - snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) - require.NoError(s.T(), err) - finalized, err := snapshot.Head() - require.NoError(s.T(), err) - return finalized -} - // SubmitSmokeTestTransaction will submit a create account transaction to smoke test network // This ensures a single transaction can be sealed by the network. -func (s *Suite) SubmitSmokeTestTransaction(ctx context.Context) { +func (s *DynamicEpochTransitionSuite) SubmitSmokeTestTransaction(ctx context.Context) { _, err := utils.CreateFlowAccount(ctx, s.Client) require.NoError(s.T(), err) } @@ -565,7 +432,7 @@ func (s *Suite) SubmitSmokeTestTransaction(ctx context.Context) { // 3. Check that we can execute a script on the AN // // TODO test sending and observing result of a transaction via the new AN (blocked by https://github.com/onflow/flow-go/issues/3642) -func (s *Suite) AssertNetworkHealthyAfterANChange(ctx context.Context, env templates.Environment, snapshotInSecondEpoch *inmem.Snapshot, info *StakedNodeOperationInfo) { +func (s *DynamicEpochTransitionSuite) AssertNetworkHealthyAfterANChange(ctx context.Context, env templates.Environment, snapshotInSecondEpoch *inmem.Snapshot, info *StakedNodeOperationInfo) { // get snapshot directly from new AN and compare head with head from the // snapshot that was used to bootstrap the node @@ -586,14 +453,14 @@ func (s *Suite) AssertNetworkHealthyAfterANChange(ctx context.Context, env templ // AssertNetworkHealthyAfterVNChange performs a basic network health check after replacing a verification node. // 1. Ensure sealing continues into the second epoch (post-replacement) by observing // at least 10 blocks of sealing progress within the epoch -func (s *Suite) AssertNetworkHealthyAfterVNChange(ctx context.Context, _ templates.Environment, snapshotInSecondEpoch *inmem.Snapshot, _ *StakedNodeOperationInfo) { +func (s *DynamicEpochTransitionSuite) AssertNetworkHealthyAfterVNChange(ctx context.Context, _ templates.Environment, snapshotInSecondEpoch *inmem.Snapshot, _ *StakedNodeOperationInfo) { s.AwaitSealedBlockHeightExceedsSnapshot(ctx, snapshotInSecondEpoch, 10, 30*time.Second, time.Millisecond*100) } // AssertNetworkHealthyAfterLNChange performs a basic network health check after replacing a collection node. // 1. Submit transaction to network that will target the newly staked LN by making // sure the reference block ID is after the first epoch. -func (s *Suite) AssertNetworkHealthyAfterLNChange(ctx context.Context, _ templates.Environment, _ *inmem.Snapshot, _ *StakedNodeOperationInfo) { +func (s *DynamicEpochTransitionSuite) AssertNetworkHealthyAfterLNChange(ctx context.Context, _ templates.Environment, _ *inmem.Snapshot, _ *StakedNodeOperationInfo) { // At this point we have reached the second epoch and our new LN is the only LN in the network. // To validate the LN joined the network successfully and is processing transactions we create // an account, which submits a transaction and verifies it is sealed. @@ -609,7 +476,7 @@ func (s *Suite) AssertNetworkHealthyAfterLNChange(ctx context.Context, _ templat // therefore the newly joined consensus node must be participating in consensus. // // In addition, here, we submit a transaction and verify that it is sealed. -func (s *Suite) AssertNetworkHealthyAfterSNChange(ctx context.Context, _ templates.Environment, _ *inmem.Snapshot, _ *StakedNodeOperationInfo) { +func (s *DynamicEpochTransitionSuite) AssertNetworkHealthyAfterSNChange(ctx context.Context, _ templates.Environment, _ *inmem.Snapshot, _ *StakedNodeOperationInfo) { s.SubmitSmokeTestTransaction(ctx) } @@ -621,7 +488,7 @@ func (s *Suite) AssertNetworkHealthyAfterSNChange(ctx context.Context, _ templat // * that nodes can stake and join the network at an epoch boundary // * that nodes can unstake and leave the network at an epoch boundary // * role-specific network health validation after the swap has completed -func (s *Suite) RunTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth nodeUpdateValidation) { +func (s *DynamicEpochTransitionSuite) RunTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth nodeUpdateValidation) { env := utils.LocalnetEnv() @@ -633,7 +500,7 @@ func (s *Suite) RunTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth node require.NotNil(s.T(), containerToReplace) } else { // grab the first container of this node role type, this is the container we will replace - containerToReplace = s.getContainerToReplace(role) + containerToReplace = s.GetContainersByRole(role)[0] require.NotNil(s.T(), containerToReplace) } @@ -694,23 +561,3 @@ func (s *Suite) RunTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth node // make sure the network is healthy after adding new node checkNetworkHealth(s.Ctx, env, secondEpochSnapshot, info) } - -// DynamicEpochTransitionSuite is the suite used for epoch transitions tests -// with a dynamic identity table. -type DynamicEpochTransitionSuite struct { - Suite -} - -func (s *DynamicEpochTransitionSuite) SetupTest() { - // use a longer staking auction length to accommodate staking operations for joining/leaving nodes - // NOTE: this value is set fairly aggressively to ensure shorter test times. - // If flakiness due to failure to complete staking operations in time is observed, - // try increasing (by 10-20 views). - s.StakingAuctionLen = 50 - s.DKGPhaseLen = 50 - s.EpochLen = 250 - s.EpochCommitSafetyThreshold = 20 - - // run the generic setup, which starts up the network - s.Suite.SetupTest() -} diff --git a/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go b/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go new file mode 100644 index 00000000000..790f4e7913c --- /dev/null +++ b/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go @@ -0,0 +1,32 @@ +package recover_epoch + +import ( + "context" + "fmt" + "github.com/onflow/flow-go/model/flow" + "github.com/stretchr/testify/suite" + "testing" + "time" +) + +func TestRecoverEpoch(t *testing.T) { + suite.Run(t, new(RecoverEpochSuite)) +} + +type RecoverEpochSuite struct { + Suite +} + +// TestRecoverEpoch ensures that the recover_epoch transaction flow works as expected. This test will simulate the network going +// into EFM by taking a consensus node offline before completing the DKG. While in EFM mode the test will execute the efm-recover-tx-args +// CLI command to generate transaction arguments to submit a recover_epoch transaction, after submitting the transaction the test will +// ensure the network is healthy. +func (s *RecoverEpochSuite) TestRecoverEpoch() { + s.AwaitEpochPhase(context.Background(), 0, flow.EpochPhaseSetup, 20*time.Second, time.Second) + fmt.Println("in epoch phase setup") + + sns := s.GetContainersByRole(flow.RoleConsensus) + sns[0].Pause() + + // @TODO: trigger EFM manually +} diff --git a/integration/tests/epochs/recover_epoch/suite.go b/integration/tests/epochs/recover_epoch/suite.go new file mode 100644 index 00000000000..bce14f8036c --- /dev/null +++ b/integration/tests/epochs/recover_epoch/suite.go @@ -0,0 +1,24 @@ +package recover_epoch + +import ( + "github.com/onflow/flow-go/integration/tests/epochs" +) + +// Suite encapsulates common functionality for epoch integration tests. +type Suite struct { + epochs.BaseSuite +} + +func (s *Suite) SetupTest() { + // use a longer staking auction length to accommodate staking operations for joining/leaving nodes + // NOTE: this value is set fairly aggressively to ensure shorter test times. + // If flakiness due to failure to complete staking operations in time is observed, + // try increasing (by 10-20 views). + s.StakingAuctionLen = 2 + s.DKGPhaseLen = 50 + s.EpochLen = 250 + s.EpochCommitSafetyThreshold = 20 + + // run the generic setup, which starts up the network + s.BaseSuite.SetupTest() +} From b491179904a07e97077721fc0d9d2d34c32454e1 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 8 Apr 2024 12:53:34 -0400 Subject: [PATCH 077/148] Update service_events_fixtures.go --- utils/unittest/service_events_fixtures.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/unittest/service_events_fixtures.go b/utils/unittest/service_events_fixtures.go index e8fd351ab19..4d5fdeaae06 100644 --- a/utils/unittest/service_events_fixtures.go +++ b/utils/unittest/service_events_fixtures.go @@ -1078,7 +1078,7 @@ func VerifyCdcArguments(t *testing.T, expected []cadence.Value, actual []interfa require.NoError(t, err) // parse cadence value - decoded, err := json2.Decode(nil, bz) + decoded, err := jsoncdc.Decode(nil, bz) require.NoError(t, err) assert.Equal(t, expected[index], decoded) From a2242985fbf5f30ec74dfd3fea05c7b3e9f3da51 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Mon, 8 Apr 2024 13:11:26 -0700 Subject: [PATCH 078/148] [Network] Use string concatination in rpc tracer ID --- network/p2p/tracer/internal/rpc_sent_cache.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/network/p2p/tracer/internal/rpc_sent_cache.go b/network/p2p/tracer/internal/rpc_sent_cache.go index 655ddf2179f..d1f5de9c294 100644 --- a/network/p2p/tracer/internal/rpc_sent_cache.go +++ b/network/p2p/tracer/internal/rpc_sent_cache.go @@ -1,8 +1,6 @@ package internal import ( - "fmt" - "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" @@ -79,5 +77,5 @@ func (r *rpcSentCache) size() uint { // Returns: // - flow.Identifier: the entity ID. func (r *rpcSentCache) rpcSentEntityID(messageId string, controlMsgType p2pmsg.ControlMessageType) flow.Identifier { - return flow.MakeIDFromFingerPrint([]byte(fmt.Sprintf("%s%s", messageId, controlMsgType))) + return flow.MakeIDFromFingerPrint([]byte(messageId + string(controlMsgType))) } From 3baf7c07cb98b325b1cc21e30251ea8e433aaeba Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 8 Apr 2024 15:19:25 -0700 Subject: [PATCH 079/148] update comments --- engine/execution/ingestion/throttle.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/engine/execution/ingestion/throttle.go b/engine/execution/ingestion/throttle.go index 16ca08797e9..480388acb88 100644 --- a/engine/execution/ingestion/throttle.go +++ b/engine/execution/ingestion/throttle.go @@ -17,7 +17,7 @@ import ( // blocks until the execution has caught up const CatchUpThreshold = 500 -// BlockThrottle is a helper struct that throttles the unexecuted blocks to be sent +// BlockThrottle is a helper struct that helps throttle the unexecuted blocks to be sent // to the block queue for execution. // It is useful for case when execution is falling far behind the finalization, in which case // we want to throttle the blocks to be sent to the block queue for fetching data to execute @@ -68,7 +68,7 @@ func NewBlockThrottle( executed: executed, finalized: finalized, - log: log.With().Str("component", "throttle").Logger(), + log: log.With().Str("component", "block_throttle").Logger(), state: state, headers: headers, }, nil @@ -77,11 +77,13 @@ func NewBlockThrottle( func (c *BlockThrottle) Init(processables chan<- flow.Identifier) error { c.mu.Lock() defer c.mu.Unlock() + c.log.Info().Msgf("initializing block throttle") if c.inited { return fmt.Errorf("throttle already inited") } c.inited = true + c.processables = processables var unexecuted []flow.Identifier var err error @@ -90,17 +92,21 @@ func (c *BlockThrottle) Init(processables chan<- flow.Identifier) error { if err != nil { return err } + c.log.Info().Msgf("loaded %d unexecuted blocks", len(unexecuted)) } else { unexecuted, err = findFinalized(c.state, c.headers, c.executed, c.executed+500) if err != nil { return err } + c.log.Info().Msgf("loaded %d unexecuted finalized blocks", len(unexecuted)) } for _, id := range unexecuted { c.processables <- id } + c.log.Info().Msgf("throttle initialized with %d unexecuted blocks", len(unexecuted)) + return nil } @@ -147,6 +153,7 @@ func (c *BlockThrottle) OnBlockExecuted(_ flow.Identifier, executed uint64) erro func (c *BlockThrottle) OnBlock(blockID flow.Identifier) error { c.mu.Lock() defer c.mu.Unlock() + c.log.Debug().Msgf("recieved block (%v)", blockID) if !c.inited { return fmt.Errorf("throttle not inited") @@ -159,6 +166,8 @@ func (c *BlockThrottle) OnBlock(blockID flow.Identifier) error { // if has caught up, then process the block c.processables <- blockID + c.log.Debug().Msgf("processed block (%v)", blockID) + return nil } From fb3ec2cbb20ef5d7e2a4379912d5596447567ce2 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 8 Apr 2024 18:46:57 -0400 Subject: [PATCH 080/148] lint fix --- cmd/util/cmd/common/clusters.go | 1 + cmd/util/cmd/common/utils.go | 2 +- cmd/util/cmd/epochs/cmd/recover.go | 16 ++++++++-------- utils/unittest/service_events_fixtures.go | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index 669cec40283..0224dd529e1 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -3,6 +3,7 @@ package common import ( "errors" "fmt" + "github.com/rs/zerolog" "github.com/onflow/cadence" diff --git a/cmd/util/cmd/common/utils.go b/cmd/util/cmd/common/utils.go index 6ee6b2460ac..1014d845d47 100644 --- a/cmd/util/cmd/common/utils.go +++ b/cmd/util/cmd/common/utils.go @@ -8,9 +8,9 @@ import ( "path/filepath" "strconv" + "github.com/multiformats/go-multiaddr" "github.com/rs/zerolog" - "github.com/multiformats/go-multiaddr" "github.com/onflow/crypto" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/encodable" diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index b8d7086d2ed..2e5533641c0 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -20,20 +20,20 @@ import ( // EpochRecover event to be emitted. This command retrieves the current protocol state identities, computes the cluster assignment using those // identities, generates the cluster QC's and retrieves the DKG key vector of the last successful epoch. // This recovery process has some constraints: -// - The RecoveryEpoch must have exactly the same consensus committee as participated in the most recent successful DKG. -// - The RecoveryEpoch must contain enough "internal" collection nodes so that all clusters contain a supermajority of "internal" collection nodes (same constraint as sporks) +// - The RecoveryEpoch must have exactly the same consensus committee as participated in the most recent successful DKG. +// - The RecoveryEpoch must contain enough "internal" collection nodes so that all clusters contain a supermajority of "internal" collection nodes (same constraint as sporks) var ( generateRecoverEpochTxArgsCmd = &cobra.Command{ Use: "efm-recover-tx-args", Short: "Generates recover epoch transaction arguments", - Long: ` + Long: ` Generates transaction arguments for the epoch recovery transaction. The epoch recovery transaction is used to recover from any failure in the epoch transition process without requiring a spork. This recovery process has some constraints: - The RecoveryEpoch must have exactly the same consensus committee as participated in the most recent successful DKG. - The RecoveryEpoch must contain enough "internal" collection nodes so that all clusters contain a supermajority of "internal" collection nodes (same constraint as sporks) `, - Run: generateRecoverEpochTxArgs(getSnapshot), + Run: generateRecoverEpochTxArgs(getSnapshot), } flagAnAddress string @@ -62,7 +62,7 @@ func addGenerateRecoverEpochTxArgsCmdFlags() { generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInEpoch, "epoch-length", 4000, "length of each epoch measured in views") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInStakingAuction, "epoch-staking-phase-length", 100, "length of the epoch staking phase measured in views") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "the epoch counter used to generate the root cluster block") - + generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-length") generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-staking-phase-length") generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-counter") @@ -121,7 +121,7 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { } // separate collector nodes by internal and partner nodes - collectors := ids.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + collectors := currentEpochIdentities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) internalCollectors := make(flow.IdentityList, 0) partnerCollectors := make(flow.IdentityList, 0) @@ -133,7 +133,7 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { internalNodesMap := make(map[flow.Identifier]struct{}) for _, node := range internalNodes { - if !ids.Exists(node.Identity()) { + if !currentEpochIdentities.Exists(node.Identity()) { log.Fatal().Msg(fmt.Sprintf("node ID found in internal node infos missing from protocol snapshot identities: %s", node.NodeID)) } internalNodesMap[node.NodeID] = struct{}{} @@ -179,7 +179,7 @@ func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { log.Fatal().Err(cdcErr).Msg("failed to get dkg group key cadence string") } dkgPubKeys = append(dkgPubKeys, dkgGroupKeyCdc) - for _, id := range ids { + for _, id := range currentEpochIdentities { if id.GetRole() == flow.RoleConsensus { dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(id.GetNodeID()) if keyShareErr != nil { diff --git a/utils/unittest/service_events_fixtures.go b/utils/unittest/service_events_fixtures.go index 4d5fdeaae06..f6bf988b339 100644 --- a/utils/unittest/service_events_fixtures.go +++ b/utils/unittest/service_events_fixtures.go @@ -6,12 +6,12 @@ import ( "encoding/json" "testing" - jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/onflow/cadence" "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/onflow/cadence/runtime/common" "github.com/onflow/crypto" "github.com/onflow/flow-go/fvm/systemcontracts" From a8cfa20026c747f6df7e806171a588cc4b022ec7 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 8 Apr 2024 18:49:57 -0400 Subject: [PATCH 081/148] Update recover.go --- cmd/util/cmd/epochs/cmd/recover.go | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 2e5533641c0..e6067ddefe5 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -48,10 +48,10 @@ This recovery process has some constraints: func init() { rootCmd.AddCommand(generateRecoverEpochTxArgsCmd) - addGenerateRecoverEpochTxArgsCmdFlags() + err := addGenerateRecoverEpochTxArgsCmdFlags() } -func addGenerateRecoverEpochTxArgsCmdFlags() { +func addGenerateRecoverEpochTxArgsCmdFlags() error { generateRecoverEpochTxArgsCmd.Flags().IntVar(&flagCollectionClusters, "collection-clusters", 3, "number of collection clusters") // required parameters for network configuration and generation of root node identities @@ -63,10 +63,23 @@ func addGenerateRecoverEpochTxArgsCmdFlags() { generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInStakingAuction, "epoch-staking-phase-length", 100, "length of the epoch staking phase measured in views") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "the epoch counter used to generate the root cluster block") - generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-length") - generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-staking-phase-length") - generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-counter") - generateRecoverEpochTxArgsCmd.MarkFlagRequired("collection-clusters") + err := generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-length") + if err != nil { + return fmt.Errorf("failed to mark epoch-length flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-staking-phase-length") + if err != nil { + return fmt.Errorf("failed to mark epoch-staking-phase-length flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-counter") + if err != nil { + return fmt.Errorf("failed to mark epoch-counter flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("collection-clusters") + if err != nil { + return fmt.Errorf("failed to mark collection-clusters flag as required") + } + return nil } func getSnapshot() *inmem.Snapshot { From 22511abefb4961a9242256f0790cb62caa5d3320 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 8 Apr 2024 18:50:47 -0400 Subject: [PATCH 082/148] fix lint --- cmd/bootstrap/cmd/final_list.go | 1 + cmd/bootstrap/cmd/key.go | 1 + cmd/dynamic_startup.go | 1 + cmd/util/cmd/common/clusters.go | 1 + cmd/util/cmd/common/utils.go | 1 + cmd/util/cmd/epochs/cmd/recover.go | 3 +++ cmd/utils.go | 1 + utils/unittest/service_events_fixtures.go | 1 + 8 files changed, 10 insertions(+) diff --git a/cmd/bootstrap/cmd/final_list.go b/cmd/bootstrap/cmd/final_list.go index 709e041ff63..6f72461087a 100644 --- a/cmd/bootstrap/cmd/final_list.go +++ b/cmd/bootstrap/cmd/final_list.go @@ -2,6 +2,7 @@ package cmd import ( "fmt" + "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd" diff --git a/cmd/bootstrap/cmd/key.go b/cmd/bootstrap/cmd/key.go index 8265b93dd6c..34551b28869 100644 --- a/cmd/bootstrap/cmd/key.go +++ b/cmd/bootstrap/cmd/key.go @@ -2,6 +2,7 @@ package cmd import ( "fmt" + "github.com/onflow/crypto" "github.com/spf13/cobra" diff --git a/cmd/dynamic_startup.go b/cmd/dynamic_startup.go index 89b6d0a2054..616773c1e00 100644 --- a/cmd/dynamic_startup.go +++ b/cmd/dynamic_startup.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/onflow/crypto" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index 0224dd529e1..ffb822ba332 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -7,6 +7,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/cadence" + "github.com/onflow/flow-go/cmd/bootstrap/run" "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" diff --git a/cmd/util/cmd/common/utils.go b/cmd/util/cmd/common/utils.go index 1014d845d47..f5b9570071e 100644 --- a/cmd/util/cmd/common/utils.go +++ b/cmd/util/cmd/common/utils.go @@ -12,6 +12,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index e6067ddefe5..bc97fc622ec 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -49,6 +49,9 @@ This recovery process has some constraints: func init() { rootCmd.AddCommand(generateRecoverEpochTxArgsCmd) err := addGenerateRecoverEpochTxArgsCmdFlags() + if err != nil { + panic(err) + } } func addGenerateRecoverEpochTxArgsCmdFlags() error { diff --git a/cmd/utils.go b/cmd/utils.go index 713a58375c8..a3464bceb7b 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -11,6 +11,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" diff --git a/utils/unittest/service_events_fixtures.go b/utils/unittest/service_events_fixtures.go index f6bf988b339..9248968fc39 100644 --- a/utils/unittest/service_events_fixtures.go +++ b/utils/unittest/service_events_fixtures.go @@ -14,6 +14,7 @@ import ( jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/onflow/cadence/runtime/common" "github.com/onflow/crypto" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" ) From 2eb6b1149ef814ca3320c6917c627553a6dd5881 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 8 Apr 2024 18:51:36 -0400 Subject: [PATCH 083/148] fix imports --- cmd/util/cmd/epochs/cmd/recover.go | 2 ++ cmd/util/cmd/epochs/cmd/recover_test.go | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index bc97fc622ec..d365b5daad9 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -3,9 +3,11 @@ package cmd import ( "context" "fmt" + "github.com/spf13/cobra" "github.com/onflow/cadence" + "github.com/onflow/flow-go/cmd/bootstrap/run" "github.com/onflow/flow-go/cmd/util/cmd/common" epochcmdutil "github.com/onflow/flow-go/cmd/util/cmd/epochs/utils" diff --git a/cmd/util/cmd/epochs/cmd/recover_test.go b/cmd/util/cmd/epochs/cmd/recover_test.go index 0a5eec84788..980a9788a55 100644 --- a/cmd/util/cmd/epochs/cmd/recover_test.go +++ b/cmd/util/cmd/epochs/cmd/recover_test.go @@ -3,9 +3,10 @@ package cmd import ( "bytes" "encoding/json" + "testing" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" - "testing" "github.com/stretchr/testify/require" From ad9cedeb5f2ae79bd5f4b7dbd8d857d183784e45 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 9 Apr 2024 00:23:05 -0400 Subject: [PATCH 084/148] fix cmd unit tests add missing "wrote file" logs --- cmd/bootstrap/cmd/db_encryption_key.go | 2 +- cmd/bootstrap/cmd/dkg.go | 2 ++ cmd/bootstrap/cmd/final_list.go | 1 + cmd/bootstrap/cmd/finalize.go | 1 + cmd/bootstrap/cmd/finalize_test.go | 2 -- cmd/bootstrap/cmd/genconfig.go | 1 + cmd/bootstrap/cmd/key.go | 8 ++++++-- cmd/bootstrap/cmd/keygen.go | 1 + cmd/bootstrap/cmd/machine_account.go | 1 + cmd/bootstrap/cmd/machine_account_key.go | 1 + cmd/bootstrap/cmd/partner_infos.go | 2 ++ cmd/bootstrap/cmd/qc.go | 1 + cmd/bootstrap/cmd/rootblock.go | 3 +++ .../tests/epochs/cohort2/epoch_join_and_leave_vn_test.go | 2 +- 14 files changed, 22 insertions(+), 6 deletions(-) diff --git a/cmd/bootstrap/cmd/db_encryption_key.go b/cmd/bootstrap/cmd/db_encryption_key.go index 560d6d17c02..897a7099c90 100644 --- a/cmd/bootstrap/cmd/db_encryption_key.go +++ b/cmd/bootstrap/cmd/db_encryption_key.go @@ -56,5 +56,5 @@ func dbEncryptionKeyRun(_ *cobra.Command, _ []string) { log.Fatal().Err(err).Msg("failed to write file") } - log.Info().Msgf("wrote file %v", dbEncryptionKeyPath) + log.Info().Msgf("wrote file %s/%s", flagOutdir, dbEncryptionKeyPath) } diff --git a/cmd/bootstrap/cmd/dkg.go b/cmd/bootstrap/cmd/dkg.go index 38c9626c340..44805407e4e 100644 --- a/cmd/bootstrap/cmd/dkg.go +++ b/cmd/bootstrap/cmd/dkg.go @@ -43,6 +43,7 @@ func runBeaconKG(nodes []model.NodeInfo) dkg.DKGData { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, fmt.Sprintf(model.PathRandomBeaconPriv, nodeID)) } // write full DKG info that will be used to construct QC @@ -56,6 +57,7 @@ func runBeaconKG(nodes []model.NodeInfo) dkg.DKGData { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathRootDKGData) return dkgData } diff --git a/cmd/bootstrap/cmd/final_list.go b/cmd/bootstrap/cmd/final_list.go index 6f72461087a..d2e078cb4b4 100644 --- a/cmd/bootstrap/cmd/final_list.go +++ b/cmd/bootstrap/cmd/final_list.go @@ -72,6 +72,7 @@ func finalList(cmd *cobra.Command, args []string) { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathFinallist) } func validateNodes(localNodes []model.NodeInfo, registeredNodes []model.NodeInfo) { diff --git a/cmd/bootstrap/cmd/finalize.go b/cmd/bootstrap/cmd/finalize.go index 86b95e0d75a..62bc9213006 100644 --- a/cmd/bootstrap/cmd/finalize.go +++ b/cmd/bootstrap/cmd/finalize.go @@ -203,6 +203,7 @@ func finalize(cmd *cobra.Command, args []string) { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathRootProtocolStateSnapshot) log.Info().Msg("") // read snapshot and verify consistency diff --git a/cmd/bootstrap/cmd/finalize_test.go b/cmd/bootstrap/cmd/finalize_test.go index 8d5f15ad19b..89088898c7d 100644 --- a/cmd/bootstrap/cmd/finalize_test.go +++ b/cmd/bootstrap/cmd/finalize_test.go @@ -2,7 +2,6 @@ package cmd import ( "encoding/hex" - "fmt" "math/rand" "path/filepath" "regexp" @@ -93,7 +92,6 @@ func TestFinalize_HappyPath(t *testing.T) { log = log.Hook(hook) finalize(nil, nil) - fmt.Println(hook.logs.String()) assert.Regexp(t, finalizeHappyPathRegex, hook.logs.String()) hook.logs.Reset() diff --git a/cmd/bootstrap/cmd/genconfig.go b/cmd/bootstrap/cmd/genconfig.go index f8b565ca704..f1902778f3a 100644 --- a/cmd/bootstrap/cmd/genconfig.go +++ b/cmd/bootstrap/cmd/genconfig.go @@ -61,6 +61,7 @@ func genconfigCmdRun(_ *cobra.Command, _ []string) { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, flagConfig) } // genconfigCmd represents the genconfig command diff --git a/cmd/bootstrap/cmd/key.go b/cmd/bootstrap/cmd/key.go index 34551b28869..7ef97a19a8e 100644 --- a/cmd/bootstrap/cmd/key.go +++ b/cmd/bootstrap/cmd/key.go @@ -98,23 +98,26 @@ func keyCmdRun(_ *cobra.Command, _ []string) { if err != nil { log.Fatal().Err(err).Msg("failed to write file") } - log.Info().Msgf("wrote file %v", model.PathNodeID) + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeID) err = common.WriteJSON(fmt.Sprintf(model.PathNodeInfoPriv, nodeInfo.NodeID), flagOutdir, private) if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeInfoPriv) err = common.WriteText(fmt.Sprintf(model.PathSecretsEncryptionKey, nodeInfo.NodeID), flagOutdir, secretsDBKey) if err != nil { log.Fatal().Err(err).Msg("failed to write file") } - log.Info().Msgf("wrote file %v", model.PathSecretsEncryptionKey) + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathSecretsEncryptionKey) err = common.WriteJSON(fmt.Sprintf(model.PathNodeInfoPub, nodeInfo.NodeID), flagOutdir, nodeInfo.Public()) if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeInfoPub) + // write machine account info if role == flow.RoleCollection || role == flow.RoleConsensus { @@ -131,6 +134,7 @@ func keyCmdRun(_ *cobra.Command, _ []string) { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeMachineAccountPrivateKey) } } diff --git a/cmd/bootstrap/cmd/keygen.go b/cmd/bootstrap/cmd/keygen.go index 72de2201d97..43da4d6cf90 100644 --- a/cmd/bootstrap/cmd/keygen.go +++ b/cmd/bootstrap/cmd/keygen.go @@ -129,4 +129,5 @@ func genNodePubInfo(nodes []model.NodeInfo) { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathInternalNodeInfosPub) } diff --git a/cmd/bootstrap/cmd/machine_account.go b/cmd/bootstrap/cmd/machine_account.go index bc16565e267..bdaa7a08922 100644 --- a/cmd/bootstrap/cmd/machine_account.go +++ b/cmd/bootstrap/cmd/machine_account.go @@ -85,6 +85,7 @@ func machineAccountRun(_ *cobra.Command, _ []string) { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID)) } // readMachineAccountPriv reads the machine account private key files in the bootstrap dir diff --git a/cmd/bootstrap/cmd/machine_account_key.go b/cmd/bootstrap/cmd/machine_account_key.go index ee7c01ebad2..14bdef868df 100644 --- a/cmd/bootstrap/cmd/machine_account_key.go +++ b/cmd/bootstrap/cmd/machine_account_key.go @@ -61,4 +61,5 @@ func machineAccountKeyRun(_ *cobra.Command, _ []string) { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msg(fmt.Sprintf("wrote file %s/%s", flagOutdir, machineAccountKeyPath)) } diff --git a/cmd/bootstrap/cmd/partner_infos.go b/cmd/bootstrap/cmd/partner_infos.go index 68bee2a8430..653ee861ff7 100644 --- a/cmd/bootstrap/cmd/partner_infos.go +++ b/cmd/bootstrap/cmd/partner_infos.go @@ -207,6 +207,7 @@ func writeNodePubInfoFile(info *bootstrap.NodeInfoPub) { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, fileOutputPath) } // writePartnerWeightsFile writes the partner weights file @@ -215,6 +216,7 @@ func writePartnerWeightsFile(partnerWeights common.PartnerWeights) { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, bootstrap.FileNamePartnerWeights) } func printNodeCounts(numOfNodesByType map[flow.Role]int, totalNumOfPartnerNodes, skippedNodes int) { diff --git a/cmd/bootstrap/cmd/qc.go b/cmd/bootstrap/cmd/qc.go index 6b47bf6cf6f..22474ed1d19 100644 --- a/cmd/bootstrap/cmd/qc.go +++ b/cmd/bootstrap/cmd/qc.go @@ -53,5 +53,6 @@ func constructRootVotes(block *flow.Block, allNodes, internalNodes []bootstrap.N if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, path) } } diff --git a/cmd/bootstrap/cmd/rootblock.go b/cmd/bootstrap/cmd/rootblock.go index 7b2f97c8923..9810834c2e4 100644 --- a/cmd/bootstrap/cmd/rootblock.go +++ b/cmd/bootstrap/cmd/rootblock.go @@ -169,6 +169,7 @@ func rootBlock(cmd *cobra.Command, args []string) { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeInfosPub) log.Info().Msg("") log.Info().Msg("running DKG for consensus nodes") @@ -221,6 +222,7 @@ func rootBlock(cmd *cobra.Command, args []string) { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathIntermediaryBootstrappingData) log.Info().Msg("") log.Info().Msg("constructing root block") @@ -229,6 +231,7 @@ func rootBlock(cmd *cobra.Command, args []string) { if err != nil { log.Fatal().Err(err).Msg("failed to write json") } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathRootBlockData) log.Info().Msg("") log.Info().Msg("constructing and writing votes") diff --git a/integration/tests/epochs/cohort2/epoch_join_and_leave_vn_test.go b/integration/tests/epochs/cohort2/epoch_join_and_leave_vn_test.go index ed8f7ef1ae1..f94066eb14e 100644 --- a/integration/tests/epochs/cohort2/epoch_join_and_leave_vn_test.go +++ b/integration/tests/epochs/cohort2/epoch_join_and_leave_vn_test.go @@ -32,7 +32,7 @@ func (s *EpochJoinAndLeaveVNSuite) SetupTest() { s.DKGPhaseLen = 100 s.EpochLen = 450 s.EpochCommitSafetyThreshold = 20 - s.Suite.SetupTest() + s.BaseSuite.SetupTest() } // TestEpochJoinAndLeaveVN should update verification nodes and assert healthy network conditions From 27011027a7b7e7d769067c607c58ad74a28e50bc Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 9 Apr 2024 00:26:07 -0400 Subject: [PATCH 085/148] remove extra SN nodes --- integration/tests/epochs/base_suite.go | 2 -- .../tests/epochs/recover_epoch/recover_epoch_efm_test.go | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/integration/tests/epochs/base_suite.go b/integration/tests/epochs/base_suite.go index d7a8356228f..d1e10ee1d20 100644 --- a/integration/tests/epochs/base_suite.go +++ b/integration/tests/epochs/base_suite.go @@ -91,8 +91,6 @@ func (s *BaseSuite) SetupTest() { testnet.NewNodeConfig(flow.RoleCollection, collectionConfigs...), testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel), testnet.WithAdditionalFlag("--extensive-logging=true")), testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel)), testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.WarnLevel)), diff --git a/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go b/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go index 790f4e7913c..84937385779 100644 --- a/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go +++ b/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go @@ -26,7 +26,7 @@ func (s *RecoverEpochSuite) TestRecoverEpoch() { fmt.Println("in epoch phase setup") sns := s.GetContainersByRole(flow.RoleConsensus) - sns[0].Pause() + _ = sns[0].Pause() // @TODO: trigger EFM manually } From e07c96a62da8c57b4afd16c98de70f19998a18b9 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 9 Apr 2024 00:31:19 -0400 Subject: [PATCH 086/148] Update recover_epoch_efm_test.go --- .../tests/epochs/recover_epoch/recover_epoch_efm_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go b/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go index 84937385779..6de2caaba21 100644 --- a/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go +++ b/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go @@ -3,10 +3,12 @@ package recover_epoch import ( "context" "fmt" - "github.com/onflow/flow-go/model/flow" - "github.com/stretchr/testify/suite" "testing" "time" + + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/model/flow" ) func TestRecoverEpoch(t *testing.T) { From 6ab8300125afbdf45d6d275d8e7628da9c2f0bb9 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 9 Apr 2024 11:08:21 -0500 Subject: [PATCH 087/148] Move reusable migration funcs to migrations/utils.go --- .../migrations/atree_register_migration.go | 55 ----------------- cmd/util/ledger/migrations/utils.go | 59 +++++++++++++++++++ 2 files changed, 59 insertions(+), 55 deletions(-) diff --git a/cmd/util/ledger/migrations/atree_register_migration.go b/cmd/util/ledger/migrations/atree_register_migration.go index f982e961063..6722216e34a 100644 --- a/cmd/util/ledger/migrations/atree_register_migration.go +++ b/cmd/util/ledger/migrations/atree_register_migration.go @@ -14,7 +14,6 @@ import ( "github.com/onflow/cadence/runtime" "github.com/onflow/cadence/runtime/common" "github.com/onflow/cadence/runtime/interpreter" - "github.com/onflow/cadence/runtime/stdlib" "github.com/onflow/flow-go/cmd/util/ledger/reporters" "github.com/onflow/flow-go/fvm/environment" @@ -180,41 +179,6 @@ func (m *AtreeRegisterMigrator) MigrateAccount( return newPayloads, nil } -func checkStorageHealth( - address common.Address, - storage *runtime.Storage, - payloads []*ledger.Payload, -) error { - - for _, payload := range payloads { - registerID, _, err := convert.PayloadToRegister(payload) - if err != nil { - return fmt.Errorf("failed to convert payload to register: %w", err) - } - - if !registerID.IsSlabIndex() { - continue - } - - // Convert the register ID to a storage ID. - slabID := atree.NewStorageID( - atree.Address([]byte(registerID.Owner)), - atree.StorageIndex([]byte(registerID.Key[1:]))) - - // Retrieve the slab. - _, _, err = storage.Retrieve(slabID) - if err != nil { - return fmt.Errorf("failed to retrieve slab %s: %w", slabID, err) - } - } - - for _, domain := range domains { - _ = storage.GetStorageMap(address, domain, false) - } - - return storage.CheckHealth() -} - func (m *AtreeRegisterMigrator) migrateAccountStorage( mr *migratorRuntime, storageMapIds map[string]struct{}, @@ -494,25 +458,6 @@ func capturePanic(f func()) (err error) { return } -// convert all domains -var domains = []string{ - common.PathDomainStorage.Identifier(), - common.PathDomainPrivate.Identifier(), - common.PathDomainPublic.Identifier(), - runtime.StorageDomainContract, - stdlib.InboxStorageDomain, - stdlib.CapabilityControllerStorageDomain, -} - -var domainsLookupMap = map[string]struct{}{ - common.PathDomainStorage.Identifier(): {}, - common.PathDomainPrivate.Identifier(): {}, - common.PathDomainPublic.Identifier(): {}, - runtime.StorageDomainContract: {}, - stdlib.InboxStorageDomain: {}, - stdlib.CapabilityControllerStorageDomain: {}, -} - // migrationProblem is a struct for reporting errors type migrationProblem struct { Address string diff --git a/cmd/util/ledger/migrations/utils.go b/cmd/util/ledger/migrations/utils.go index e747b3dc508..ea790bf3ff1 100644 --- a/cmd/util/ledger/migrations/utils.go +++ b/cmd/util/ledger/migrations/utils.go @@ -4,8 +4,13 @@ import ( "fmt" "github.com/onflow/atree" + "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/runtime/stdlib" "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" "github.com/onflow/flow-go/model/flow" ) @@ -59,3 +64,57 @@ func (a *AccountsAtreeLedger) AllocateStorageIndex(owner []byte) (atree.StorageI } return v, nil } + +func checkStorageHealth( + address common.Address, + storage *runtime.Storage, + payloads []*ledger.Payload, +) error { + + for _, payload := range payloads { + registerID, _, err := convert.PayloadToRegister(payload) + if err != nil { + return fmt.Errorf("failed to convert payload to register: %w", err) + } + + if !registerID.IsSlabIndex() { + continue + } + + // Convert the register ID to a storage ID. + slabID := atree.NewStorageID( + atree.Address([]byte(registerID.Owner)), + atree.StorageIndex([]byte(registerID.Key[1:]))) + + // Retrieve the slab. + _, _, err = storage.Retrieve(slabID) + if err != nil { + return fmt.Errorf("failed to retrieve slab %s: %w", slabID, err) + } + } + + for _, domain := range domains { + _ = storage.GetStorageMap(address, domain, false) + } + + return storage.CheckHealth() +} + +// convert all domains +var domains = []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPrivate.Identifier(), + common.PathDomainPublic.Identifier(), + runtime.StorageDomainContract, + stdlib.InboxStorageDomain, + stdlib.CapabilityControllerStorageDomain, +} + +var domainsLookupMap = map[string]struct{}{ + common.PathDomainStorage.Identifier(): {}, + common.PathDomainPrivate.Identifier(): {}, + common.PathDomainPublic.Identifier(): {}, + runtime.StorageDomainContract: {}, + stdlib.InboxStorageDomain: {}, + stdlib.CapabilityControllerStorageDomain: {}, +} From d1f78a8a5a68c6f1698251343c60a64c403dcbdf Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 26 Mar 2024 16:42:47 -0700 Subject: [PATCH 088/148] add ingestion core --- engine/execution/ingestion/core.go | 461 +++++++++++++++++++++++++++++ 1 file changed, 461 insertions(+) create mode 100644 engine/execution/ingestion/core.go diff --git a/engine/execution/ingestion/core.go b/engine/execution/ingestion/core.go new file mode 100644 index 00000000000..56b7c0084f1 --- /dev/null +++ b/engine/execution/ingestion/core.go @@ -0,0 +1,461 @@ +package ingestion + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/engine/execution/ingestion/block_queue" + "github.com/onflow/flow-go/engine/execution/ingestion/stop" + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/mempool/entity" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" +) + +// Core connects the execution components +// when it receives blocks and collections, it forwards them to the block queue. +// when the block queue decides to execute blocks, it forwards to the executor for execution +// when the block queue decides to fetch missing collections, it forwards to the collection fetcher +// when a block is executed, it notifies the block queue and forwards to execution state to save them. +type Core struct { + unit *engine.Unit // for async block execution + + log zerolog.Logger + + // state machine + blockQueue *block_queue.BlockQueue + throttle Throttle // for throttling blocks to be added to the block queue + execState state.ExecutionState + stopControl *stop.StopControl // decide whether to execute a block or not + + // data storage + headers storage.Headers + blocks storage.Blocks + collections storage.Collections + + // computation, data fetching, events + executor BlockExecutor + collectionFetcher CollectionFetcher + eventConsumer EventConsumer +} + +type Throttle interface { + Init(processables chan<- flow.Identifier) error + OnBlock(blockID flow.Identifier) error + OnBlockExecuted(blockID flow.Identifier, height uint64) error +} + +type BlockExecutor interface { + ExecuteBlock(ctx context.Context, block *entity.ExecutableBlock) (*execution.ComputationResult, error) +} + +type EventConsumer interface { + BeforeComputationResultSaved(ctx context.Context, result *execution.ComputationResult) + OnComputationResultSaved(ctx context.Context, result *execution.ComputationResult) string +} + +func NewCore( + logger zerolog.Logger, + throttle Throttle, + execState state.ExecutionState, + stopControl *stop.StopControl, + headers storage.Headers, + blocks storage.Blocks, + collections storage.Collections, + executor BlockExecutor, + collectionFetcher CollectionFetcher, + eventConsumer EventConsumer, +) *Core { + return &Core{ + log: logger.With().Str("engine", "ingestion_core").Logger(), + unit: engine.NewUnit(), + throttle: throttle, + execState: execState, + blockQueue: block_queue.NewBlockQueue(logger), + stopControl: stopControl, + headers: headers, + blocks: blocks, + collections: collections, + executor: executor, + collectionFetcher: collectionFetcher, + eventConsumer: eventConsumer, + } +} + +func (e *Core) Ready() <-chan struct{} { + if e.stopControl.IsExecutionStopped() { + return e.unit.Ready() + } + + e.launchWorkerToConsumeThrottledBlocks() + + return e.unit.Ready() +} + +func (e *Core) Done() <-chan struct{} { + return e.unit.Done() +} + +func (e *Core) OnBlock(header *flow.Header, qc *flow.QuorumCertificate) { + // qc.Block is equivalent to header.ID() + err := e.throttle.OnBlock(qc.BlockID) + if err != nil { + e.log.Fatal().Err(err).Msgf("error processing block %v (%v)", header.Height, header.ID()) + } +} + +func (e *Core) OnCollection(col *flow.Collection) { + err := e.onCollection(col) + if err != nil { + e.log.Fatal().Err(err).Msgf("error processing collection: %v", col.ID()) + } +} + +func (e *Core) launchWorkerToConsumeThrottledBlocks() { + // processables are throttled blocks + processables := make(chan flow.Identifier, 10000) + + // running worker in the background to consume + // processables blocks which are throttled, + // and forward them to the block queue for processing + e.unit.Launch(func() { + e.log.Info().Msgf("starting worker to consume throttled blocks") + err := e.forwardProcessableToHandler(processables) + if err != nil { + e.log.Fatal().Err(err).Msg("fail to process block") + } + }) + + e.log.Info().Msg("initializing throttle engine") + + err := e.throttle.Init(processables) + if err != nil { + e.log.Fatal().Err(err).Msg("fail to initialize throttle engine") + } + + e.log.Info().Msgf("throttle engine initialized") +} + +func (e *Core) forwardProcessableToHandler( + processables <-chan flow.Identifier, +) error { + for blockID := range processables { + err := e.onProcessableBlock(blockID) + if err != nil { + return fmt.Errorf("could not process block: %w", err) + } + } + + return nil +} + +func (e *Core) onProcessableBlock(blockID flow.Identifier) error { + header, err := e.headers.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("could not get block: %w", err) + } + + // skip if stopControl tells to skip + if !e.stopControl.ShouldExecuteBlock(header) { + return nil + } + + executed, err := e.execState.IsBlockExecuted(header.Height, blockID) + if err != nil { + return fmt.Errorf("could not check whether block %v is executed: %w", blockID, err) + } + + if executed { + e.log.Debug().Msg("block has been executed already") + return nil + } + + block, err := e.blocks.ByID(blockID) + if err != nil { + return fmt.Errorf("failed to get block %s: %w", blockID, err) + } + + missingColls, executables, err := e.enqueuBlock(block, blockID) + if err != nil { + return fmt.Errorf("failed to enqueue block %v: %w", blockID, err) + } + + e.executeConcurrently(executables) + + err = e.fetch(missingColls) + if err != nil { + return fmt.Errorf("failed to fetch missing collections: %w", err) + } + + return nil +} + +func (e *Core) enqueuBlock(block *flow.Block, blockID flow.Identifier) ( + []*block_queue.MissingCollection, []*entity.ExecutableBlock, error) { + lg := e.log.With(). + Hex("block_id", blockID[:]). + Uint64("height", block.Header.Height). + Logger() + + lg.Info().Msg("handling new block") + + parentCommitment, err := e.execState.StateCommitmentByBlockID(block.Header.ParentID) + + if err == nil { + // the parent block is an executed block. + missingColls, executables, err := e.blockQueue.HandleBlock(block, &parentCommitment) + if err != nil { + return nil, nil, fmt.Errorf("unexpected error while adding block to block queue: %w", err) + } + + lg.Info().Bool("parent_is_executed", true). + Int("missing_col", len(missingColls)). + Int("executables", len(executables)). + Msgf("block is enqueued") + + return missingColls, executables, nil + } + + // handle exception + if !errors.Is(err, storage.ErrNotFound) { + return nil, nil, fmt.Errorf("failed to get parent state commitment for block %v: %w", block.Header.ParentID, err) + } + + // the parent block is an unexecuted block. + // we can enqueue the block without providing the state commitment + missingColls, executables, err := e.blockQueue.HandleBlock(block, nil) + if err != nil { + if !errors.Is(err, block_queue.ErrMissingParent) { + return nil, nil, fmt.Errorf("unexpected error while adding block to block queue: %w", err) + } + + // if parent is missing, there are two possibilities: + // 1) parent was never enqueued to block queue + // 2) parent was enqueued, but it has been executed and removed from the block queue + // however, actually 1) is not possible 2) is the only possible case here, why? + // because forwardProcessableToHandler guarantees we always enqueue a block before its child, + // which means when HandleBlock is called with a block, then its parent block must have been + // called with HandleBlock already. Therefore, 1) is not possible. + // And the reason 2) is possible is because the fact that its parent block is missing + // might be outdated since OnBlockExecuted might be called concurrently in a different thread. + // it means OnBlockExecuted is called in a different thread after us getting the parent commit + // and before HandleBlock was called, therefore, we should re-enqueue the block with the + // parent commit. It's necessary to check again whether the parent block is executed after the call. + lg.Warn().Msgf( + "block is missing parent block, re-enqueueing %v(parent: %v)", + blockID, block.Header.ParentID, + ) + + parentCommitment, err := e.execState.StateCommitmentByBlockID(block.Header.ParentID) + if err != nil { + return nil, nil, fmt.Errorf("failed to get parent state commitment when re-enqueue block %v (parent:%v): %w", + blockID, block.Header.ParentID, err) + } + + // now re-enqueue the block with parent commit + missing, execs, err := e.blockQueue.HandleBlock(block, &parentCommitment) + if err != nil { + return nil, nil, fmt.Errorf("unexpected error while reenqueue block to block queue: %w", err) + } + + missingColls = deduplicate(append(missingColls, missing...)) + executables = deduplicate(append(executables, execs...)) + } + + lg.Info().Bool("parent_is_executed", false). + Int("missing_col", len(missingColls)). + Int("executables", len(executables)). + Msgf("block is enqueued") + + return missingColls, executables, nil +} + +func (e *Core) onBlockExecuted( + block *entity.ExecutableBlock, + computationResult *execution.ComputationResult, + startedAt time.Time, +) error { + commit := computationResult.CurrentEndState() + + wg := sync.WaitGroup{} + wg.Add(1) + defer wg.Wait() + + go func() { + defer wg.Done() + e.eventConsumer.BeforeComputationResultSaved(e.unit.Ctx(), computationResult) + }() + + err := e.execState.SaveExecutionResults(e.unit.Ctx(), computationResult) + if err != nil { + return fmt.Errorf("cannot persist execution state: %w", err) + } + + // must call OnBlockExecuted AFTER saving the execution result to storage + // because when enqueuing a block, we rely on execState.StateCommitmentByBlockID + // to determine whether a block has been executed or not. + executables, err := e.blockQueue.OnBlockExecuted(block.ID(), commit) + if err != nil { + return fmt.Errorf("unexpected error while marking block as executed: %w", err) + } + + e.stopControl.OnBlockExecuted(block.Block.Header) + + // notify event consumer so that the event consumer can do tasks + // such as broadcasting or uploading the result + logs := e.eventConsumer.OnComputationResultSaved(e.unit.Ctx(), computationResult) + + receipt := computationResult.ExecutionReceipt + e.log.Info(). + Hex("block_id", logging.Entity(block)). + Uint64("height", block.Block.Header.Height). + Int("collections", len(block.CompleteCollections)). + Hex("parent_block", block.Block.Header.ParentID[:]). + Int("collections", len(block.Block.Payload.Guarantees)). + Hex("start_state", block.StartState[:]). + Hex("final_state", commit[:]). + Hex("receipt_id", logging.Entity(receipt)). + Hex("result_id", logging.Entity(receipt.ExecutionResult)). + Hex("execution_data_id", receipt.ExecutionResult.ExecutionDataID[:]). + Bool("state_changed", commit != *block.StartState). + Uint64("num_txs", nonSystemTransactionCount(receipt.ExecutionResult)). + Int64("timeSpentInMS", time.Since(startedAt).Milliseconds()). + Str("logs", logs). // broadcasted + Msgf("block executed") + + // we ensures that the child blocks are only executed after the execution result of + // its parent block has been successfully saved to storage. + // this ensures OnBlockExecuted would not be called with blocks in a wrong order, such as + // OnBlockExecuted(childBlock) being called before OnBlockExecuted(parentBlock). + e.executeConcurrently(executables) + + return nil +} + +func (e *Core) onCollection(col *flow.Collection) error { + // EN might request a collection from multiple collection nodes, + // therefore might receive multiple copies of the same collection. + // we only need to store it once. + err := storeCollectionIfMissing(e.collections, col) + if err != nil { + return fmt.Errorf("failed to store collection %v: %w", col.ID(), err) + } + + // if the collection is a duplication, it's still good to add it to the block queue, + // because chances are the collection was stored before a restart, and + // is not in the queue after the restart. + // adding it to the queue ensures we don't miss any collection. + // since the queue's state is in memory, processing a duplicated collection should be + // a fast no-op, and won't return any executable blocks. + executables, err := e.blockQueue.HandleCollection(col) + if err != nil { + return fmt.Errorf("unexpected error while adding collection to block queue") + } + + e.executeConcurrently(executables) + + return nil +} + +func storeCollectionIfMissing(collections storage.Collections, col *flow.Collection) error { + _, err := collections.ByID(col.ID()) + if err != nil { + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to get collection %v: %w", col.ID(), err) + } + + err := collections.Store(col) + if err != nil { + return fmt.Errorf("failed to store collection %v: %w", col.ID(), err) + } + } + + return nil +} + +// execute block concurrently +func (e *Core) executeConcurrently(executables []*entity.ExecutableBlock) { + for _, executable := range executables { + func(executable *entity.ExecutableBlock) { + e.unit.Launch(func() { + e.log.Info().Msgf("starting worker to consume throttled blocks") + err := e.execute(executable) + if err != nil { + e.log.Error().Err(err).Msgf("failed to execute block %v", executable.Block.ID()) + } + }) + }(executable) + } +} + +func (e *Core) execute(executable *entity.ExecutableBlock) error { + if !e.stopControl.ShouldExecuteBlock(executable.Block.Header) { + return nil + } + + e.log.Info(). + Hex("block_id", logging.Entity(executable)). + Uint64("height", executable.Block.Header.Height). + Int("collections", len(executable.CompleteCollections)). + Msgf("executing block") + + startedAt := time.Now() + + result, err := e.executor.ExecuteBlock(e.unit.Ctx(), executable) + if err != nil { + return fmt.Errorf("failed to execute block %v: %w", executable.Block.ID(), err) + } + + err = e.onBlockExecuted(executable, result, startedAt) + if err != nil { + return fmt.Errorf("failed to handle execution result of block %v: %w", executable.Block.ID(), err) + } + + return nil +} + +func (e *Core) fetch(missingColls []*block_queue.MissingCollection) error { + for _, col := range missingColls { + err := e.collectionFetcher.FetchCollection(col.BlockID, col.Height, col.Guarantee) + if err != nil { + return fmt.Errorf("failed to fetch collection %v for block %v (height: %v): %w", + col.Guarantee.ID(), col.BlockID, col.Height, err) + } + } + + if len(missingColls) > 0 { + e.collectionFetcher.Force() + } + + return nil +} + +type IDEntity interface { + ID() flow.Identifier +} + +// deduplicate entities in a slice by the ID method +func deduplicate[T IDEntity](entities []T) []T { + seen := make(map[flow.Identifier]struct{}, len(entities)) + result := make([]T, 0, len(entities)) + + for _, entity := range entities { + id := entity.ID() + if _, ok := seen[id]; ok { + continue + } + + seen[id] = struct{}{} + result = append(result, entity) + } + + return result +} From 13cfccdbef1cc269ead7369e5f9b2a7b1e48bb44 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 1 Apr 2024 15:16:01 -0700 Subject: [PATCH 089/148] address review comments --- engine/execution/ingestion/core.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/engine/execution/ingestion/core.go b/engine/execution/ingestion/core.go index 56b7c0084f1..b32493b53a6 100644 --- a/engine/execution/ingestion/core.go +++ b/engine/execution/ingestion/core.go @@ -108,7 +108,8 @@ func (e *Core) OnBlock(header *flow.Header, qc *flow.QuorumCertificate) { // qc.Block is equivalent to header.ID() err := e.throttle.OnBlock(qc.BlockID) if err != nil { - e.log.Fatal().Err(err).Msgf("error processing block %v (%v)", header.Height, header.ID()) + e.log.Fatal().Err(err).Msgf("error processing block %v (qc.BlockID: %v, blockID: %v)", + header.Height, qc.BlockID, header.ID()) } } @@ -199,7 +200,10 @@ func (e *Core) onProcessableBlock(blockID flow.Identifier) error { } func (e *Core) enqueuBlock(block *flow.Block, blockID flow.Identifier) ( - []*block_queue.MissingCollection, []*entity.ExecutableBlock, error) { + []*block_queue.MissingCollection, + []*entity.ExecutableBlock, + error, +) { lg := e.log.With(). Hex("block_id", blockID[:]). Uint64("height", block.Header.Height). @@ -226,7 +230,8 @@ func (e *Core) enqueuBlock(block *flow.Block, blockID flow.Identifier) ( // handle exception if !errors.Is(err, storage.ErrNotFound) { - return nil, nil, fmt.Errorf("failed to get parent state commitment for block %v: %w", block.Header.ParentID, err) + return nil, nil, fmt.Errorf("failed to get state commitment for parent block %v of block %v (height: %v): %w", + block.Header.ParentID, blockID, block.Header.Height, err) } // the parent block is an unexecuted block. @@ -250,13 +255,13 @@ func (e *Core) enqueuBlock(block *flow.Block, blockID flow.Identifier) ( // and before HandleBlock was called, therefore, we should re-enqueue the block with the // parent commit. It's necessary to check again whether the parent block is executed after the call. lg.Warn().Msgf( - "block is missing parent block, re-enqueueing %v(parent: %v)", + "block is missing parent block, re-enqueueing %v (parent: %v)", blockID, block.Header.ParentID, ) parentCommitment, err := e.execState.StateCommitmentByBlockID(block.Header.ParentID) if err != nil { - return nil, nil, fmt.Errorf("failed to get parent state commitment when re-enqueue block %v (parent:%v): %w", + return nil, nil, fmt.Errorf("failed to get parent state commitment when re-enqueue block %v (parent: %v): %w", blockID, block.Header.ParentID, err) } From e2a16a6cd8075ddfe56c19fe7f589a21d560c1da Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 1 Apr 2024 16:48:20 -0700 Subject: [PATCH 090/148] move deduplicate to flow package --- engine/execution/ingestion/core.go | 26 ++------------------------ model/flow/entity.go | 29 +++++++++++++++++++++++++---- 2 files changed, 27 insertions(+), 28 deletions(-) diff --git a/engine/execution/ingestion/core.go b/engine/execution/ingestion/core.go index b32493b53a6..ae53a0a14ba 100644 --- a/engine/execution/ingestion/core.go +++ b/engine/execution/ingestion/core.go @@ -271,8 +271,8 @@ func (e *Core) enqueuBlock(block *flow.Block, blockID flow.Identifier) ( return nil, nil, fmt.Errorf("unexpected error while reenqueue block to block queue: %w", err) } - missingColls = deduplicate(append(missingColls, missing...)) - executables = deduplicate(append(executables, execs...)) + missingColls = flow.Deduplicate(append(missingColls, missing...)) + executables = flow.Deduplicate(append(executables, execs...)) } lg.Info().Bool("parent_is_executed", false). @@ -442,25 +442,3 @@ func (e *Core) fetch(missingColls []*block_queue.MissingCollection) error { return nil } - -type IDEntity interface { - ID() flow.Identifier -} - -// deduplicate entities in a slice by the ID method -func deduplicate[T IDEntity](entities []T) []T { - seen := make(map[flow.Identifier]struct{}, len(entities)) - result := make([]T, 0, len(entities)) - - for _, entity := range entities { - id := entity.ID() - if _, ok := seen[id]; ok { - continue - } - - seen[id] = struct{}{} - result = append(result, entity) - } - - return result -} diff --git a/model/flow/entity.go b/model/flow/entity.go index 963d0b15791..9708b48cd7d 100644 --- a/model/flow/entity.go +++ b/model/flow/entity.go @@ -1,5 +1,11 @@ package flow +type IDEntity interface { + // ID returns a unique id for this entity using a hash of the immutable + // fields of the entity. + ID() Identifier +} + // Entity defines how flow entities should be defined // Entities are flat data structures holding multiple data fields. // Entities don't include nested entities, they only include pointers to @@ -7,10 +13,7 @@ package flow // of keeping a slice of entity object itself. This simplifies storage, signature and validation // of entities. type Entity interface { - - // ID returns a unique id for this entity using a hash of the immutable - // fields of the entity. - ID() Identifier + IDEntity // Checksum returns a unique checksum for the entity, including the mutable // data such as signatures. @@ -24,3 +27,21 @@ func EntitiesToIDs[T Entity](entities []T) []Identifier { } return ids } + +// Deduplicate entities in a slice by the ID method +func Deduplicate[T IDEntity](entities []T) []T { + seen := make(map[Identifier]struct{}, len(entities)) + result := make([]T, 0, len(entities)) + + for _, entity := range entities { + id := entity.ID() + if _, ok := seen[id]; ok { + continue + } + + seen[id] = struct{}{} + result = append(result, entity) + } + + return result +} From 9eb8a810fa1b14502695a20c6d8d2b054554f2b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Tue, 9 Apr 2024 17:17:01 -0700 Subject: [PATCH 091/148] simplify migration runtime, no need for interface and environment --- .../ledger/migrations/migrator_runtime.go | 24 +- .../util/migration_runtime_interface.go | 295 ------------------ 2 files changed, 4 insertions(+), 315 deletions(-) delete mode 100644 cmd/util/ledger/util/migration_runtime_interface.go diff --git a/cmd/util/ledger/migrations/migrator_runtime.go b/cmd/util/ledger/migrations/migrator_runtime.go index 940a0074933..2fb8ef80783 100644 --- a/cmd/util/ledger/migrations/migrator_runtime.go +++ b/cmd/util/ledger/migrations/migrator_runtime.go @@ -31,29 +31,13 @@ func newMigratorRuntime( accountsAtreeLedger := util.NewAccountsAtreeLedger(accounts) storage := runtime.NewStorage(accountsAtreeLedger, nil) - ri := &util.MigrationRuntimeInterface{ - Accounts: accounts, - } - - env := runtime.NewBaseInterpreterEnvironment(runtime.Config{ - AccountLinkingEnabled: true, - // Attachments are enabled everywhere except for Mainnet - AttachmentsEnabled: true, - // Capability Controllers are enabled everywhere except for Mainnet - CapabilityControllersEnabled: true, - }) - - env.Configure( - ri, - runtime.NewCodesAndPrograms(), - storage, - runtime.NewCoverageReport(), - ) - inter, err := interpreter.NewInterpreter( nil, nil, - env.InterpreterConfig) + &interpreter.Config{ + Storage: storage, + }, + ) if err != nil { return nil, err } diff --git a/cmd/util/ledger/util/migration_runtime_interface.go b/cmd/util/ledger/util/migration_runtime_interface.go deleted file mode 100644 index c72d8493095..00000000000 --- a/cmd/util/ledger/util/migration_runtime_interface.go +++ /dev/null @@ -1,295 +0,0 @@ -package util - -import ( - "fmt" - "time" - - "go.opentelemetry.io/otel/attribute" - - "github.com/onflow/atree" - "github.com/onflow/cadence" - "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" - - "github.com/onflow/flow-go/fvm/environment" - "github.com/onflow/flow-go/model/flow" -) - -// MigrationRuntimeInterface is a runtime interface that can be used in migrations. -type MigrationRuntimeInterface struct { - Accounts environment.Accounts - Programs *environment.Programs - - // GetOrLoadProgramFunc allows for injecting extra logic - GetOrLoadProgramFunc func(location runtime.Location, load func() (*interpreter.Program, error)) (*interpreter.Program, error) -} - -func (m MigrationRuntimeInterface) ResolveLocation( - identifiers []runtime.Identifier, - location runtime.Location, -) ([]runtime.ResolvedLocation, error) { - - addressLocation, isAddress := location.(common.AddressLocation) - - // if the location is not an address location, e.g. an identifier location (`import Crypto`), - // then return a single resolved location which declares all identifiers. - if !isAddress { - return []runtime.ResolvedLocation{ - { - Location: location, - Identifiers: identifiers, - }, - }, nil - } - - // if the location is an address, - // and no specific identifiers where requested in the import statement, - // then fetch all identifiers at this address - if len(identifiers) == 0 { - address := flow.Address(addressLocation.Address) - - contractNames, err := m.Accounts.GetContractNames(address) - if err != nil { - return nil, fmt.Errorf("ResolveLocation failed: %w", err) - } - - // if there are no contractNames deployed, - // then return no resolved locations - if len(contractNames) == 0 { - return nil, nil - } - - identifiers = make([]runtime.Identifier, len(contractNames)) - - for i := range identifiers { - identifiers[i] = runtime.Identifier{ - Identifier: contractNames[i], - } - } - } - - // return one resolved location per identifier. - // each resolved location is an address contract location - resolvedLocations := make([]runtime.ResolvedLocation, len(identifiers)) - for i := range resolvedLocations { - identifier := identifiers[i] - resolvedLocations[i] = runtime.ResolvedLocation{ - Location: common.AddressLocation{ - Address: addressLocation.Address, - Name: identifier.Identifier, - }, - Identifiers: []runtime.Identifier{identifier}, - } - } - - return resolvedLocations, nil -} - -func (m MigrationRuntimeInterface) GetCode(location runtime.Location) ([]byte, error) { - contractLocation, ok := location.(common.AddressLocation) - if !ok { - return nil, fmt.Errorf("GetCode failed: expected AddressLocation") - } - - add, err := m.Accounts.GetContract(contractLocation.Name, flow.Address(contractLocation.Address)) - if err != nil { - return nil, fmt.Errorf("GetCode failed: %w", err) - } - - return add, nil -} - -func (m MigrationRuntimeInterface) GetAccountContractCode( - l common.AddressLocation, -) (code []byte, err error) { - return m.Accounts.GetContract(l.Name, flow.Address(l.Address)) -} - -func (m MigrationRuntimeInterface) GetOrLoadProgram(location runtime.Location, load func() (*interpreter.Program, error)) (*interpreter.Program, error) { - if m.GetOrLoadProgramFunc != nil { - return m.GetOrLoadProgramFunc(location, load) - } - - return m.Programs.GetOrLoadProgram(location, load) -} - -func (m MigrationRuntimeInterface) MeterMemory(_ common.MemoryUsage) error { - return nil -} - -func (m MigrationRuntimeInterface) MeterComputation(_ common.ComputationKind, _ uint) error { - return nil -} - -func (m MigrationRuntimeInterface) GetValue(_, _ []byte) (value []byte, err error) { - panic("unexpected GetValue call") -} - -func (m MigrationRuntimeInterface) SetValue(_, _, _ []byte) (err error) { - panic("unexpected SetValue call") -} - -func (m MigrationRuntimeInterface) CreateAccount(_ runtime.Address) (address runtime.Address, err error) { - panic("unexpected CreateAccount call") -} - -func (m MigrationRuntimeInterface) AddEncodedAccountKey(_ runtime.Address, _ []byte) error { - panic("unexpected AddEncodedAccountKey call") -} - -func (m MigrationRuntimeInterface) RevokeEncodedAccountKey(_ runtime.Address, _ int) (publicKey []byte, err error) { - panic("unexpected RevokeEncodedAccountKey call") -} - -func (m MigrationRuntimeInterface) AddAccountKey(_ runtime.Address, _ *runtime.PublicKey, _ runtime.HashAlgorithm, _ int) (*runtime.AccountKey, error) { - panic("unexpected AddAccountKey call") -} - -func (m MigrationRuntimeInterface) GetAccountKey(_ runtime.Address, _ int) (*runtime.AccountKey, error) { - panic("unexpected GetAccountKey call") -} - -func (m MigrationRuntimeInterface) RevokeAccountKey(_ runtime.Address, _ int) (*runtime.AccountKey, error) { - panic("unexpected RevokeAccountKey call") -} - -func (m MigrationRuntimeInterface) UpdateAccountContractCode(_ common.AddressLocation, _ []byte) (err error) { - panic("unexpected UpdateAccountContractCode call") -} - -func (m MigrationRuntimeInterface) RemoveAccountContractCode(common.AddressLocation) (err error) { - panic("unexpected RemoveAccountContractCode call") -} - -func (m MigrationRuntimeInterface) GetSigningAccounts() ([]runtime.Address, error) { - panic("unexpected GetSigningAccounts call") -} - -func (m MigrationRuntimeInterface) ProgramLog(_ string) error { - panic("unexpected ProgramLog call") -} - -func (m MigrationRuntimeInterface) EmitEvent(_ cadence.Event) error { - panic("unexpected EmitEvent call") -} - -func (m MigrationRuntimeInterface) ValueExists(_, _ []byte) (exists bool, err error) { - panic("unexpected ValueExists call") -} - -func (m MigrationRuntimeInterface) GenerateUUID() (uint64, error) { - panic("unexpected GenerateUUID call") -} - -func (m MigrationRuntimeInterface) GetComputationLimit() uint64 { - panic("unexpected GetComputationLimit call") -} - -func (m MigrationRuntimeInterface) SetComputationUsed(_ uint64) error { - panic("unexpected SetComputationUsed call") -} - -func (m MigrationRuntimeInterface) DecodeArgument(_ []byte, _ cadence.Type) (cadence.Value, error) { - panic("unexpected DecodeArgument call") -} - -func (m MigrationRuntimeInterface) GetCurrentBlockHeight() (uint64, error) { - panic("unexpected GetCurrentBlockHeight call") -} - -func (m MigrationRuntimeInterface) GetBlockAtHeight(_ uint64) (block runtime.Block, exists bool, err error) { - panic("unexpected GetBlockAtHeight call") -} - -func (m MigrationRuntimeInterface) ReadRandom([]byte) error { - panic("unexpected ReadRandom call") -} - -func (m MigrationRuntimeInterface) VerifySignature(_ []byte, _ string, _ []byte, _ []byte, _ runtime.SignatureAlgorithm, _ runtime.HashAlgorithm) (bool, error) { - panic("unexpected VerifySignature call") -} - -func (m MigrationRuntimeInterface) Hash(_ []byte, _ string, _ runtime.HashAlgorithm) ([]byte, error) { - panic("unexpected Hash call") -} - -func (m MigrationRuntimeInterface) GetAccountBalance(_ common.Address) (value uint64, err error) { - panic("unexpected GetAccountBalance call") -} - -func (m MigrationRuntimeInterface) GetAccountAvailableBalance(_ common.Address) (value uint64, err error) { - panic("unexpected GetAccountAvailableBalance call") -} - -func (m MigrationRuntimeInterface) GetStorageUsed(_ runtime.Address) (value uint64, err error) { - panic("unexpected GetStorageUsed call") -} - -func (m MigrationRuntimeInterface) GetStorageCapacity(_ runtime.Address) (value uint64, err error) { - panic("unexpected GetStorageCapacity call") -} - -func (m MigrationRuntimeInterface) ImplementationDebugLog(_ string) error { - panic("unexpected ImplementationDebugLog call") -} - -func (m MigrationRuntimeInterface) ValidatePublicKey(_ *runtime.PublicKey) error { - panic("unexpected ValidatePublicKey call") -} - -func (m MigrationRuntimeInterface) GetAccountContractNames(_ runtime.Address) ([]string, error) { - panic("unexpected GetAccountContractNames call") -} - -func (m MigrationRuntimeInterface) AllocateStorageIndex(_ []byte) (atree.StorageIndex, error) { - panic("unexpected AllocateStorageIndex call") -} - -func (m MigrationRuntimeInterface) ComputationUsed() (uint64, error) { - panic("unexpected ComputationUsed call") -} - -func (m MigrationRuntimeInterface) MemoryUsed() (uint64, error) { - panic("unexpected MemoryUsed call") -} - -func (m MigrationRuntimeInterface) InteractionUsed() (uint64, error) { - panic("unexpected InteractionUsed call") -} - -func (m MigrationRuntimeInterface) SetInterpreterSharedState(_ *interpreter.SharedState) { - panic("unexpected SetInterpreterSharedState call") -} - -func (m MigrationRuntimeInterface) GetInterpreterSharedState() *interpreter.SharedState { - panic("unexpected GetInterpreterSharedState call") -} - -func (m MigrationRuntimeInterface) AccountKeysCount(_ runtime.Address) (uint64, error) { - panic("unexpected AccountKeysCount call") -} - -func (m MigrationRuntimeInterface) BLSVerifyPOP(_ *runtime.PublicKey, _ []byte) (bool, error) { - panic("unexpected BLSVerifyPOP call") -} - -func (m MigrationRuntimeInterface) BLSAggregateSignatures(_ [][]byte) ([]byte, error) { - panic("unexpected BLSAggregateSignatures call") -} - -func (m MigrationRuntimeInterface) BLSAggregatePublicKeys(_ []*runtime.PublicKey) (*runtime.PublicKey, error) { - panic("unexpected BLSAggregatePublicKeys call") -} - -func (m MigrationRuntimeInterface) ResourceOwnerChanged(_ *interpreter.Interpreter, _ *interpreter.CompositeValue, _ common.Address, _ common.Address) { - panic("unexpected ResourceOwnerChanged call") -} - -func (m MigrationRuntimeInterface) GenerateAccountID(_ common.Address) (uint64, error) { - panic("unexpected GenerateAccountID call") -} - -func (m MigrationRuntimeInterface) RecordTrace(_ string, _ runtime.Location, _ time.Duration, _ []attribute.KeyValue) { - panic("unexpected RecordTrace call") -} From 9882d2374675c13f5392bc40d24332eeda382ee3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Tue, 9 Apr 2024 17:17:55 -0700 Subject: [PATCH 092/148] simplify validation runtime: no need for interface and environment --- .../migrations/cadence_value_validation.go | 238 ++---------------- .../ledger/migrations/migrator_runtime.go | 4 - 2 files changed, 18 insertions(+), 224 deletions(-) diff --git a/cmd/util/ledger/migrations/cadence_value_validation.go b/cmd/util/ledger/migrations/cadence_value_validation.go index e72985dac95..755b5ffd538 100644 --- a/cmd/util/ledger/migrations/cadence_value_validation.go +++ b/cmd/util/ledger/migrations/cadence_value_validation.go @@ -3,15 +3,11 @@ package migrations import ( "fmt" "strings" - "time" - "github.com/onflow/atree" - "github.com/onflow/cadence" "github.com/onflow/cadence/runtime" "github.com/onflow/cadence/runtime/common" "github.com/onflow/cadence/runtime/interpreter" "github.com/rs/zerolog" - "go.opentelemetry.io/otel/attribute" "github.com/onflow/flow-go/cmd/util/ledger/util" "github.com/onflow/flow-go/ledger" @@ -99,7 +95,12 @@ func validateStorageDomain( newValue := newStorageMap.ReadValue(nil, mapKey) - err := cadenceValueEqual(oldRuntime.Interpreter, oldValue, newRuntime.Interpreter, newValue) + err := cadenceValueEqual( + oldRuntime.Interpreter, + oldValue, + newRuntime.Interpreter, + newValue, + ) if err != nil { if verboseLogging { log.Info(). @@ -112,7 +113,13 @@ func validateStorageDomain( Msgf("failed to validate value") } - return fmt.Errorf("failed to validate value for address %s, domain %s, key %s: %s", address.Hex(), domain, key, err.Error()) + return fmt.Errorf( + "failed to validate value for address %s, domain %s, key %s: %s", + address.Hex(), + domain, + key, + err.Error(), + ) } } @@ -380,22 +387,13 @@ func newReadonlyStorageRuntime(payloads []*ledger.Payload) ( storage := runtime.NewStorage(readonlyLedger, nil) - env := runtime.NewBaseInterpreterEnvironment(runtime.Config{ - AccountLinkingEnabled: true, - // Attachments are enabled everywhere except for Mainnet - AttachmentsEnabled: true, - // Capability Controllers are enabled everywhere except for Mainnet - CapabilityControllersEnabled: true, - }) - - env.Configure( - &NoopRuntimeInterface{}, - runtime.NewCodesAndPrograms(), - storage, + inter, err := interpreter.NewInterpreter( nil, + nil, + &interpreter.Config{ + Storage: storage, + }, ) - - inter, err := interpreter.NewInterpreter(nil, nil, env.InterpreterConfig) if err != nil { return nil, err } @@ -405,203 +403,3 @@ func newReadonlyStorageRuntime(payloads []*ledger.Payload) ( Storage: storage, }, nil } - -// NoopRuntimeInterface is a runtime interface that can be used in migrations. -type NoopRuntimeInterface struct { -} - -func (NoopRuntimeInterface) ResolveLocation(_ []runtime.Identifier, _ runtime.Location) ([]runtime.ResolvedLocation, error) { - panic("unexpected ResolveLocation call") -} - -func (NoopRuntimeInterface) GetCode(_ runtime.Location) ([]byte, error) { - panic("unexpected GetCode call") -} - -func (NoopRuntimeInterface) GetAccountContractCode(_ common.AddressLocation) ([]byte, error) { - panic("unexpected GetAccountContractCode call") -} - -func (NoopRuntimeInterface) GetOrLoadProgram(_ runtime.Location, _ func() (*interpreter.Program, error)) (*interpreter.Program, error) { - panic("unexpected GetOrLoadProgram call") -} - -func (NoopRuntimeInterface) MeterMemory(_ common.MemoryUsage) error { - return nil -} - -func (NoopRuntimeInterface) MeterComputation(_ common.ComputationKind, _ uint) error { - return nil -} - -func (NoopRuntimeInterface) GetValue(_, _ []byte) (value []byte, err error) { - panic("unexpected GetValue call") -} - -func (NoopRuntimeInterface) SetValue(_, _, _ []byte) (err error) { - panic("unexpected SetValue call") -} - -func (NoopRuntimeInterface) CreateAccount(_ runtime.Address) (address runtime.Address, err error) { - panic("unexpected CreateAccount call") -} - -func (NoopRuntimeInterface) AddEncodedAccountKey(_ runtime.Address, _ []byte) error { - panic("unexpected AddEncodedAccountKey call") -} - -func (NoopRuntimeInterface) RevokeEncodedAccountKey(_ runtime.Address, _ int) (publicKey []byte, err error) { - panic("unexpected RevokeEncodedAccountKey call") -} - -func (NoopRuntimeInterface) AddAccountKey(_ runtime.Address, _ *runtime.PublicKey, _ runtime.HashAlgorithm, _ int) (*runtime.AccountKey, error) { - panic("unexpected AddAccountKey call") -} - -func (NoopRuntimeInterface) GetAccountKey(_ runtime.Address, _ int) (*runtime.AccountKey, error) { - panic("unexpected GetAccountKey call") -} - -func (NoopRuntimeInterface) RevokeAccountKey(_ runtime.Address, _ int) (*runtime.AccountKey, error) { - panic("unexpected RevokeAccountKey call") -} - -func (NoopRuntimeInterface) UpdateAccountContractCode(_ common.AddressLocation, _ []byte) (err error) { - panic("unexpected UpdateAccountContractCode call") -} - -func (NoopRuntimeInterface) RemoveAccountContractCode(common.AddressLocation) (err error) { - panic("unexpected RemoveAccountContractCode call") -} - -func (NoopRuntimeInterface) GetSigningAccounts() ([]runtime.Address, error) { - panic("unexpected GetSigningAccounts call") -} - -func (NoopRuntimeInterface) ProgramLog(_ string) error { - panic("unexpected ProgramLog call") -} - -func (NoopRuntimeInterface) EmitEvent(_ cadence.Event) error { - panic("unexpected EmitEvent call") -} - -func (NoopRuntimeInterface) ValueExists(_, _ []byte) (exists bool, err error) { - panic("unexpected ValueExists call") -} - -func (NoopRuntimeInterface) GenerateUUID() (uint64, error) { - panic("unexpected GenerateUUID call") -} - -func (NoopRuntimeInterface) GetComputationLimit() uint64 { - panic("unexpected GetComputationLimit call") -} - -func (NoopRuntimeInterface) SetComputationUsed(_ uint64) error { - panic("unexpected SetComputationUsed call") -} - -func (NoopRuntimeInterface) DecodeArgument(_ []byte, _ cadence.Type) (cadence.Value, error) { - panic("unexpected DecodeArgument call") -} - -func (NoopRuntimeInterface) GetCurrentBlockHeight() (uint64, error) { - panic("unexpected GetCurrentBlockHeight call") -} - -func (NoopRuntimeInterface) GetBlockAtHeight(_ uint64) (block runtime.Block, exists bool, err error) { - panic("unexpected GetBlockAtHeight call") -} - -func (NoopRuntimeInterface) ReadRandom([]byte) error { - panic("unexpected ReadRandom call") -} - -func (NoopRuntimeInterface) VerifySignature(_ []byte, _ string, _ []byte, _ []byte, _ runtime.SignatureAlgorithm, _ runtime.HashAlgorithm) (bool, error) { - panic("unexpected VerifySignature call") -} - -func (NoopRuntimeInterface) Hash(_ []byte, _ string, _ runtime.HashAlgorithm) ([]byte, error) { - panic("unexpected Hash call") -} - -func (NoopRuntimeInterface) GetAccountBalance(_ common.Address) (value uint64, err error) { - panic("unexpected GetAccountBalance call") -} - -func (NoopRuntimeInterface) GetAccountAvailableBalance(_ common.Address) (value uint64, err error) { - panic("unexpected GetAccountAvailableBalance call") -} - -func (NoopRuntimeInterface) GetStorageUsed(_ runtime.Address) (value uint64, err error) { - panic("unexpected GetStorageUsed call") -} - -func (NoopRuntimeInterface) GetStorageCapacity(_ runtime.Address) (value uint64, err error) { - panic("unexpected GetStorageCapacity call") -} - -func (NoopRuntimeInterface) ImplementationDebugLog(_ string) error { - panic("unexpected ImplementationDebugLog call") -} - -func (NoopRuntimeInterface) ValidatePublicKey(_ *runtime.PublicKey) error { - panic("unexpected ValidatePublicKey call") -} - -func (NoopRuntimeInterface) GetAccountContractNames(_ runtime.Address) ([]string, error) { - panic("unexpected GetAccountContractNames call") -} - -func (NoopRuntimeInterface) AllocateStorageIndex(_ []byte) (atree.StorageIndex, error) { - panic("unexpected AllocateStorageIndex call") -} - -func (NoopRuntimeInterface) ComputationUsed() (uint64, error) { - panic("unexpected ComputationUsed call") -} - -func (NoopRuntimeInterface) MemoryUsed() (uint64, error) { - panic("unexpected MemoryUsed call") -} - -func (NoopRuntimeInterface) InteractionUsed() (uint64, error) { - panic("unexpected InteractionUsed call") -} - -func (NoopRuntimeInterface) SetInterpreterSharedState(_ *interpreter.SharedState) { - panic("unexpected SetInterpreterSharedState call") -} - -func (NoopRuntimeInterface) GetInterpreterSharedState() *interpreter.SharedState { - panic("unexpected GetInterpreterSharedState call") -} - -func (NoopRuntimeInterface) AccountKeysCount(_ runtime.Address) (uint64, error) { - panic("unexpected AccountKeysCount call") -} - -func (NoopRuntimeInterface) BLSVerifyPOP(_ *runtime.PublicKey, _ []byte) (bool, error) { - panic("unexpected BLSVerifyPOP call") -} - -func (NoopRuntimeInterface) BLSAggregateSignatures(_ [][]byte) ([]byte, error) { - panic("unexpected BLSAggregateSignatures call") -} - -func (NoopRuntimeInterface) BLSAggregatePublicKeys(_ []*runtime.PublicKey) (*runtime.PublicKey, error) { - panic("unexpected BLSAggregatePublicKeys call") -} - -func (NoopRuntimeInterface) ResourceOwnerChanged(_ *interpreter.Interpreter, _ *interpreter.CompositeValue, _ common.Address, _ common.Address) { - panic("unexpected ResourceOwnerChanged call") -} - -func (NoopRuntimeInterface) GenerateAccountID(_ common.Address) (uint64, error) { - panic("unexpected GenerateAccountID call") -} - -func (NoopRuntimeInterface) RecordTrace(_ string, _ runtime.Location, _ time.Duration, _ []attribute.KeyValue) { - panic("unexpected RecordTrace call") -} diff --git a/cmd/util/ledger/migrations/migrator_runtime.go b/cmd/util/ledger/migrations/migrator_runtime.go index 2fb8ef80783..6fe67c9017a 100644 --- a/cmd/util/ledger/migrations/migrator_runtime.go +++ b/cmd/util/ledger/migrations/migrator_runtime.go @@ -62,7 +62,3 @@ type migratorRuntime struct { Address common.Address Accounts *util.AccountsAtreeLedger } - -func (mr *migratorRuntime) GetReadOnlyStorage() *runtime.Storage { - return runtime.NewStorage(util.NewPayloadsReadonlyLedger(mr.Snapshot), nil) -} From e25a0c85ebcaae55dccc3035857078c17cf14f4e Mon Sep 17 00:00:00 2001 From: Andrii Slisarchuk Date: Wed, 10 Apr 2024 15:39:37 +0300 Subject: [PATCH 093/148] Fixed remarks --- engine/access/rpc/backend/backend.go | 76 ++++++++++--------- .../backend/backend_stream_transactions.go | 44 ++++++----- .../tests/access/cohort1/access_api_test.go | 3 + 3 files changed, 67 insertions(+), 56 deletions(-) diff --git a/engine/access/rpc/backend/backend.go b/engine/access/rpc/backend/backend.go index 436f3eaba65..cc9da264c49 100644 --- a/engine/access/rpc/backend/backend.go +++ b/engine/access/rpc/backend/backend.go @@ -162,15 +162,6 @@ func New(params Params) (*Backend, error) { // initialize node version info nodeInfo := getNodeVersionInfo(params.State.Params()) - transactionsLocalDataProvider := &TransactionsLocalDataProvider{ - state: params.State, - collections: params.Collections, - blocks: params.Blocks, - eventsIndex: params.EventsIndex, - txResultsIndex: params.TxResultsIndex, - systemTxID: systemTxID, - } - b := &Backend{ state: params.State, BlockTracker: params.BlockTracker, @@ -187,25 +178,6 @@ func New(params Params) (*Backend, error) { scriptExecutor: params.ScriptExecutor, scriptExecMode: params.ScriptExecutionMode, }, - backendTransactions: backendTransactions{ - TransactionsLocalDataProvider: transactionsLocalDataProvider, - log: params.Log, - staticCollectionRPC: params.CollectionRPC, - chainID: params.ChainID, - transactions: params.Transactions, - executionReceipts: params.ExecutionReceipts, - transactionValidator: configureTransactionValidator(params.State, params.ChainID), - transactionMetrics: params.AccessMetrics, - retry: retry, - connFactory: params.ConnFactory, - previousAccessNodes: params.HistoricalAccessNodes, - nodeCommunicator: params.Communicator, - txResultCache: txResCache, - txErrorMessagesCache: txErrorMessagesCache, - txResultQueryMode: params.TxResultQueryMode, - systemTx: systemTx, - systemTxID: systemTxID, - }, backendEvents: backendEvents{ log: params.Log, chain: params.ChainID.Chain(), @@ -253,13 +225,7 @@ func New(params Params) (*Backend, error) { subscriptionHandler: params.SubscriptionHandler, blockTracker: params.BlockTracker, }, - backendSubscribeTransactions: backendSubscribeTransactions{ - txLocalDataProvider: transactionsLocalDataProvider, - log: params.Log, - executionResults: params.ExecutionResults, - subscriptionHandler: params.SubscriptionHandler, - blockTracker: params.BlockTracker, - }, + collections: params.Collections, executionReceipts: params.ExecutionReceipts, connFactory: params.ConnFactory, @@ -267,8 +233,46 @@ func New(params Params) (*Backend, error) { nodeInfo: nodeInfo, } + transactionsLocalDataProvider := &TransactionsLocalDataProvider{ + state: params.State, + collections: params.Collections, + blocks: params.Blocks, + eventsIndex: params.EventsIndex, + txResultsIndex: params.TxResultsIndex, + systemTxID: systemTxID, + } + + b.backendTransactions = backendTransactions{ + TransactionsLocalDataProvider: transactionsLocalDataProvider, + log: params.Log, + staticCollectionRPC: params.CollectionRPC, + chainID: params.ChainID, + transactions: params.Transactions, + executionReceipts: params.ExecutionReceipts, + transactionValidator: configureTransactionValidator(params.State, params.ChainID), + transactionMetrics: params.AccessMetrics, + retry: retry, + connFactory: params.ConnFactory, + previousAccessNodes: params.HistoricalAccessNodes, + nodeCommunicator: params.Communicator, + txResultCache: txResCache, + txErrorMessagesCache: txErrorMessagesCache, + txResultQueryMode: params.TxResultQueryMode, + systemTx: systemTx, + systemTxID: systemTxID, + } + b.backendTransactions.txErrorMessages = b - b.backendSubscribeTransactions.backendTransactions = &b.backendTransactions + + b.backendSubscribeTransactions = backendSubscribeTransactions{ + txLocalDataProvider: transactionsLocalDataProvider, + backendTransactions: &b.backendTransactions, + log: params.Log, + executionResults: params.ExecutionResults, + subscriptionHandler: params.SubscriptionHandler, + blockTracker: params.BlockTracker, + } + retry.SetBackend(b) preferredENIdentifiers, err = identifierList(params.PreferredExecutionNodeIDs) diff --git a/engine/access/rpc/backend/backend_stream_transactions.go b/engine/access/rpc/backend/backend_stream_transactions.go index 08628f9b289..846ceb38cb1 100644 --- a/engine/access/rpc/backend/backend_stream_transactions.go +++ b/engine/access/rpc/backend/backend_stream_transactions.go @@ -71,7 +71,7 @@ func (b *backendSubscribeTransactions) SubscribeTransactionStatuses( // subscription responses based on new blocks. func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *TransactionSubscriptionMetadata) func(context.Context, uint64) (interface{}, error) { return func(ctx context.Context, height uint64) (interface{}, error) { - err := b.validateBlockHeight(height) + err := b.checkBlockReady(height) if err != nil { return nil, err } @@ -122,11 +122,11 @@ func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *Tran // If block with transaction was not found, get transaction status to check if it different from last status if txInfo.blockWithTx == nil { txInfo.Status, err = b.txLocalDataProvider.DeriveUnknownTransactionStatus(txInfo.txReferenceBlockID) - } else { - //If transaction result was not found, get transaction status to check if it different from last status - if txInfo.Status == prevTxStatus { - txInfo.Status, err = b.txLocalDataProvider.DeriveTransactionStatus(txInfo.BlockID, txInfo.blockWithTx.Height, txInfo.txExecuted) - } + } else if txInfo.Status == prevTxStatus { + // When a block with the transaction is available, it is possible to receive a new transaction status while + // searching for the transaction result. Otherwise, it remains unchanged. So, if the old and new transaction + // statuses are the same, the current transaction status should be retrieved. + txInfo.Status, err = b.txLocalDataProvider.DeriveTransactionStatus(txInfo.BlockID, txInfo.blockWithTx.Height, txInfo.txExecuted) } if err != nil { if !errors.Is(err, state.ErrUnknownSnapshotReference) { @@ -135,7 +135,8 @@ func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *Tran return nil, rpc.ConvertStorageError(err) } - // The same transaction status should not be reported, so return here with no response + // If the old and new transaction statuses are still the same, the status change should not be reported, so + // return here with no response. if prevTxStatus == txInfo.Status { return nil, nil } @@ -154,12 +155,16 @@ func (b *backendSubscribeTransactions) generateResultsWithMissingStatuses( txInfo *TransactionSubscriptionMetadata, prevTxStatus flow.TransactionStatus, ) ([]*access.TransactionResult, error) { - - // If the status is expired, which is the last status, return its result. + // If the previous status is pending and the new status is expired, which is the last status, return its result. + // If the previous status is anything other than pending, return an error, as this transition is unexpected. if txInfo.Status == flow.TransactionStatusExpired { - return []*access.TransactionResult{ - txInfo.TransactionResult, - }, nil + if prevTxStatus == flow.TransactionStatusPending { + return []*access.TransactionResult{ + txInfo.TransactionResult, + }, nil + } else { + return nil, fmt.Errorf("unexpected transition from %s to %s transaction status", prevTxStatus.String(), txInfo.Status.String()) + } } var results []*access.TransactionResult @@ -167,28 +172,27 @@ func (b *backendSubscribeTransactions) generateResultsWithMissingStatuses( // If the difference between statuses' values is more than one step, fill in the missing results. if (txInfo.Status - prevTxStatus) > 1 { for missingStatus := prevTxStatus + 1; missingStatus < txInfo.Status; missingStatus++ { - var missingTxResult access.TransactionResult switch missingStatus { case flow.TransactionStatusPending: - missingTxResult = access.TransactionResult{ + results = append(results, &access.TransactionResult{ Status: missingStatus, TransactionID: txInfo.TransactionID, - } + }) case flow.TransactionStatusFinalized: - missingTxResult = access.TransactionResult{ + results = append(results, &access.TransactionResult{ Status: missingStatus, TransactionID: txInfo.TransactionID, BlockID: txInfo.BlockID, BlockHeight: txInfo.BlockHeight, CollectionID: txInfo.CollectionID, - } + }) case flow.TransactionStatusExecuted: - missingTxResult = *txInfo.TransactionResult + missingTxResult := *txInfo.TransactionResult missingTxResult.Status = missingStatus + results = append(results, &missingTxResult) default: return nil, fmt.Errorf("unexpected missing transaction status") } - results = append(results, &missingTxResult) } } @@ -196,7 +200,7 @@ func (b *backendSubscribeTransactions) generateResultsWithMissingStatuses( return results, nil } -func (b *backendSubscribeTransactions) validateBlockHeight(height uint64) error { +func (b *backendSubscribeTransactions) checkBlockReady(height uint64) error { // Get the highest available finalized block height highestHeight, err := b.blockTracker.GetHighestHeight(flow.BlockStatusFinalized) if err != nil { diff --git a/integration/tests/access/cohort1/access_api_test.go b/integration/tests/access/cohort1/access_api_test.go index 2e89ab6a6f6..e3ad3369c43 100644 --- a/integration/tests/access/cohort1/access_api_test.go +++ b/integration/tests/access/cohort1/access_api_test.go @@ -311,6 +311,9 @@ func (s *AccessAPISuite) TestSendAndSubscribeTransactionStatuses() { expectedCounter++ lastReportedTxStatus = resp.TransactionResults.Status } + + // Check, if the final transaction status is sealed. + s.Assert().Equal(entities.TransactionStatus_SEALED, lastReportedTxStatus) } func (s *AccessAPISuite) testGetAccount(client *client.Client) { From cd6a7a3bab4e19f8b3c1e8b6591fc0672bc69edc Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Wed, 10 Apr 2024 16:14:06 +0300 Subject: [PATCH 094/148] Added AssertAllResponsesHandled to ResponseTracker --- .../access/cohort3/grpc_state_stream_test.go | 62 ++++++++++++------- .../cohort3/grpc_streaming_blocks_test.go | 25 +++----- 2 files changed, 45 insertions(+), 42 deletions(-) diff --git a/integration/tests/access/cohort3/grpc_state_stream_test.go b/integration/tests/access/cohort3/grpc_state_stream_test.go index 685f38c1770..be6f0840b99 100644 --- a/integration/tests/access/cohort3/grpc_state_stream_test.go +++ b/integration/tests/access/cohort3/grpc_state_stream_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "google.golang.org/grpc" @@ -189,8 +190,6 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { txGenerator, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() s.Require().NoError(err) - header, err := txGenerator.GetLatestSealedBlockHeader(s.ctx) - s.Require().NoError(err) var startValue interface{} txCount := 10 @@ -198,9 +197,9 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { for _, rpc := range s.testedRPCs() { s.T().Run(rpc.name, func(t *testing.T) { if rpc.name == "SubscribeEventsFromStartBlockID" { - startValue = header.ID.Bytes() + startValue = convert.IdentifierToMessage(blockA.ID()) } else { - startValue = header.Height + startValue = blockA.Header.Height } testANRecv := rpc.call(s.ctx, sdkClientTestAN, startValue, &executiondata.EventFilter{}) @@ -237,7 +236,7 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { foundONTxCount := 0 messageIndex := counters.NewMonotonousCounter(0) - r := NewResponseTracker(compareEventsResponse) + r := NewResponseTracker(compareEventsResponse, 3) for { select { @@ -274,6 +273,8 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { break } } + + r.AssertAllResponsesHandled(t, txCount) }) } } @@ -364,21 +365,39 @@ func (s *GrpcStateStreamSuite) getRPCs() []subscribeEventsRPCTest { // ResponseTracker is a generic tracker for responses. type ResponseTracker[T any] struct { - r map[uint64]map[string]T - mu sync.RWMutex - compare func(t *testing.T, responses map[uint64]map[string]T, blockHeight uint64) error + r map[uint64]map[string]T + mu sync.RWMutex + compare func(t *testing.T, responses map[uint64]map[string]T, blockHeight uint64) error + checkCount int // actual common count of responses we want to check + responsesCountToCompare int // count of responses that we want to compare with each other } // NewResponseTracker creates a new ResponseTracker. func NewResponseTracker[T any]( compare func(t *testing.T, responses map[uint64]map[string]T, blockHeight uint64) error, + responsesCountToCompare int, ) *ResponseTracker[T] { return &ResponseTracker[T]{ - r: make(map[uint64]map[string]T), - compare: compare, + r: make(map[uint64]map[string]T), + compare: compare, + responsesCountToCompare: responsesCountToCompare, } } +func (r *ResponseTracker[T]) AssertAllResponsesHandled(t *testing.T, expectedCheckCount int) { + assert.Equal(t, expectedCheckCount, r.checkCount) + + // we check if response tracker has some responses which were not checked, but should be checked + hasNotComparedResponses := false + for _, valueMap := range r.r { + if len(valueMap) == r.responsesCountToCompare { + hasNotComparedResponses = true + break + } + } + assert.False(t, hasNotComparedResponses) +} + func (r *ResponseTracker[T]) Add(t *testing.T, blockHeight uint64, name string, response T) { r.mu.Lock() defer r.mu.Unlock() @@ -388,6 +407,11 @@ func (r *ResponseTracker[T]) Add(t *testing.T, blockHeight uint64, name string, } r.r[blockHeight][name] = response + if len(r.r[blockHeight]) != r.responsesCountToCompare { + return + } + + r.checkCount += 1 err := r.compare(t, r.r, blockHeight) if err != nil { log.Fatalf("comparison error at block height %d: %v", blockHeight, err) @@ -411,29 +435,21 @@ func eventsResponseHandler(msg *executiondata.SubscribeEventsResponse) (*Subscri } func compareEventsResponse(t *testing.T, responses map[uint64]map[string]*SubscribeEventsResponse, blockHeight uint64) error { - if len(responses[blockHeight]) != 3 { - return nil - } + accessControlData := responses[blockHeight]["access_control"] accessTestData := responses[blockHeight]["access_test"] observerTestData := responses[blockHeight]["observer_test"] // Compare access_control with access_test - err := compareEvents(t, accessControlData, accessTestData) - if err != nil { - return fmt.Errorf("failure comparing access and access data: %d: %v", blockHeight, err) - } + compareEvents(t, accessControlData, accessTestData) // Compare access_control with observer_test - err = compareEvents(t, accessControlData, observerTestData) - if err != nil { - return fmt.Errorf("failure comparing access and observer data: %d: %v", blockHeight, err) - } + compareEvents(t, accessControlData, observerTestData) return nil } -func compareEvents(t *testing.T, controlData, testData *SubscribeEventsResponse) error { +func compareEvents(t *testing.T, controlData, testData *SubscribeEventsResponse) { require.Equal(t, controlData.BlockID, testData.BlockID) require.Equal(t, controlData.Height, testData.Height) require.Equal(t, controlData.BlockTimestamp, testData.BlockTimestamp) @@ -447,8 +463,6 @@ func compareEvents(t *testing.T, controlData, testData *SubscribeEventsResponse) require.Equal(t, controlData.Events[i].EventIndex, testData.Events[i].EventIndex) require.True(t, bytes.Equal(controlData.Events[i].Payload, testData.Events[i].Payload)) } - - return nil } // TODO: switch to SDK versions once crypto library is fixed to support the latest SDK version diff --git a/integration/tests/access/cohort3/grpc_streaming_blocks_test.go b/integration/tests/access/cohort3/grpc_streaming_blocks_test.go index 75b0a4b5ffd..82e1c23cf28 100644 --- a/integration/tests/access/cohort3/grpc_streaming_blocks_test.go +++ b/integration/tests/access/cohort3/grpc_streaming_blocks_test.go @@ -152,20 +152,15 @@ func (s *GrpcBlocksStreamSuite) TestHappyPath() { // wait for the requested number of sealed blocks s.BlockState.WaitForSealed(s.T(), blockA.Header.Height+blockCount) - txGenerator, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() - s.Require().NoError(err) - header, err := txGenerator.GetLatestSealedBlockHeader(s.ctx) - s.Require().NoError(err) - var startValue interface{} txCount := 10 for _, rpc := range s.testedRPCs() { s.T().Run(rpc.name, func(t *testing.T) { if rpc.name == "SubscribeBlocksFromStartBlockID" { - startValue = header.ID.Bytes() + startValue = convert.IdentifierToMessage(blockA.ID()) } else { - startValue = header.Height + startValue = blockA.Header.Height } accessRecv := rpc.call(s.ctx, accessClient, startValue) @@ -179,7 +174,7 @@ func (s *GrpcBlocksStreamSuite) TestHappyPath() { foundANTxCount := 0 foundONTxCount := 0 - r := NewResponseTracker(compareBlocksResponse) + r := NewResponseTracker(compareBlocksResponse, 2) for { select { @@ -201,6 +196,8 @@ func (s *GrpcBlocksStreamSuite) TestHappyPath() { break } } + + r.AssertAllResponsesHandled(t, txCount) }) } } @@ -210,28 +207,20 @@ func blockResponseHandler(msg *accessproto.SubscribeBlocksResponse) (*flow.Block } func compareBlocksResponse(t *testing.T, responses map[uint64]map[string]*flow.Block, blockHeight uint64) error { - if len(responses[blockHeight]) != 2 { - return nil - } accessData := responses[blockHeight]["access"] observerData := responses[blockHeight]["observer"] // Compare access with observer - err := compareBlocks(t, accessData, observerData) - if err != nil { - return fmt.Errorf("failure comparing access and observer data: %d: %v", blockHeight, err) - } + compareBlocks(t, accessData, observerData) return nil } -func compareBlocks(t *testing.T, accessBlock *flow.Block, observerBlock *flow.Block) error { +func compareBlocks(t *testing.T, accessBlock *flow.Block, observerBlock *flow.Block) { require.Equal(t, accessBlock.ID(), observerBlock.ID()) require.Equal(t, accessBlock.Header.Height, observerBlock.Header.Height) require.Equal(t, accessBlock.Header.Timestamp, observerBlock.Header.Timestamp) require.Equal(t, accessBlock.Payload.Hash(), observerBlock.Payload.Hash()) - - return nil } type subscribeBlocksRPCTest struct { From 44a7ca656de046c4543fb3c3c5b17a41ef7d859f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Wed, 10 Apr 2024 10:45:13 -0700 Subject: [PATCH 095/148] improve naming for migrator runtime --- .../migrations/atree_register_migration.go | 12 +++--- ....go => atree_register_migrator_runtime.go} | 38 +++++++++---------- .../cadence_value_validation_test.go | 4 +- 3 files changed, 27 insertions(+), 27 deletions(-) rename cmd/util/ledger/migrations/{migrator_runtime.go => atree_register_migrator_runtime.go} (55%) diff --git a/cmd/util/ledger/migrations/atree_register_migration.go b/cmd/util/ledger/migrations/atree_register_migration.go index 6722216e34a..82674a0f766 100644 --- a/cmd/util/ledger/migrations/atree_register_migration.go +++ b/cmd/util/ledger/migrations/atree_register_migration.go @@ -94,7 +94,7 @@ func (m *AtreeRegisterMigrator) MigrateAccount( oldPayloads []*ledger.Payload, ) ([]*ledger.Payload, error) { // create all the runtime components we need for the migration - mr, err := newMigratorRuntime(address, oldPayloads) + mr, err := NewAtreeRegisterMigratorRuntime(address, oldPayloads) if err != nil { return nil, fmt.Errorf("failed to create migrator runtime: %w", err) } @@ -162,7 +162,7 @@ func (m *AtreeRegisterMigrator) MigrateAccount( // Check storage health after migration, if enabled. if m.checkStorageHealthAfterMigration { - mr, err := newMigratorRuntime(address, newPayloads) + mr, err := NewAtreeRegisterMigratorRuntime(address, newPayloads) if err != nil { return nil, fmt.Errorf("failed to create migrator runtime: %w", err) } @@ -180,7 +180,7 @@ func (m *AtreeRegisterMigrator) MigrateAccount( } func (m *AtreeRegisterMigrator) migrateAccountStorage( - mr *migratorRuntime, + mr *AtreeRegisterMigratorRuntime, storageMapIds map[string]struct{}, ) (map[flow.RegisterID]flow.RegisterValue, error) { @@ -207,7 +207,7 @@ func (m *AtreeRegisterMigrator) migrateAccountStorage( } func (m *AtreeRegisterMigrator) convertStorageDomain( - mr *migratorRuntime, + mr *AtreeRegisterMigratorRuntime, storageMapIds map[string]struct{}, domain string, ) error { @@ -285,7 +285,7 @@ func (m *AtreeRegisterMigrator) convertStorageDomain( } func (m *AtreeRegisterMigrator) validateChangesAndCreateNewRegisters( - mr *migratorRuntime, + mr *AtreeRegisterMigratorRuntime, changes map[flow.RegisterID]flow.RegisterValue, storageMapIds map[string]struct{}, ) ([]*ledger.Payload, error) { @@ -420,7 +420,7 @@ func (m *AtreeRegisterMigrator) validateChangesAndCreateNewRegisters( } func (m *AtreeRegisterMigrator) cloneValue( - mr *migratorRuntime, + mr *AtreeRegisterMigratorRuntime, value interpreter.Value, ) (interpreter.Value, error) { diff --git a/cmd/util/ledger/migrations/migrator_runtime.go b/cmd/util/ledger/migrations/atree_register_migrator_runtime.go similarity index 55% rename from cmd/util/ledger/migrations/migrator_runtime.go rename to cmd/util/ledger/migrations/atree_register_migrator_runtime.go index 6fe67c9017a..77f52d9198f 100644 --- a/cmd/util/ledger/migrations/migrator_runtime.go +++ b/cmd/util/ledger/migrations/atree_register_migrator_runtime.go @@ -13,12 +13,12 @@ import ( "github.com/onflow/flow-go/ledger" ) -// migratorRuntime is a runtime that can be used to run a migration on a single account -func newMigratorRuntime( +// NewAtreeRegisterMigratorRuntime returns a new runtime to be used with the AtreeRegisterMigrator. +func NewAtreeRegisterMigratorRuntime( address common.Address, payloads []*ledger.Payload, ) ( - *migratorRuntime, + *AtreeRegisterMigratorRuntime, error, ) { snapshot, err := util.NewPayloadSnapshot(payloads) @@ -42,23 +42,23 @@ func newMigratorRuntime( return nil, err } - return &migratorRuntime{ - Address: address, - Payloads: payloads, - Snapshot: snapshot, - TransactionState: transactionState, - Interpreter: inter, - Storage: storage, - Accounts: accountsAtreeLedger, + return &AtreeRegisterMigratorRuntime{ + Address: address, + Payloads: payloads, + Snapshot: snapshot, + TransactionState: transactionState, + Interpreter: inter, + Storage: storage, + AccountsAtreeLedger: accountsAtreeLedger, }, nil } -type migratorRuntime struct { - Snapshot *util.PayloadSnapshot - TransactionState state.NestedTransactionPreparer - Interpreter *interpreter.Interpreter - Storage *runtime.Storage - Payloads []*ledger.Payload - Address common.Address - Accounts *util.AccountsAtreeLedger +type AtreeRegisterMigratorRuntime struct { + Snapshot *util.PayloadSnapshot + TransactionState state.NestedTransactionPreparer + Interpreter *interpreter.Interpreter + Storage *runtime.Storage + Payloads []*ledger.Payload + Address common.Address + AccountsAtreeLedger *util.AccountsAtreeLedger } diff --git a/cmd/util/ledger/migrations/cadence_value_validation_test.go b/cmd/util/ledger/migrations/cadence_value_validation_test.go index 6ba449b987d..117e27ea761 100644 --- a/cmd/util/ledger/migrations/cadence_value_validation_test.go +++ b/cmd/util/ledger/migrations/cadence_value_validation_test.go @@ -52,7 +52,7 @@ func TestValidateCadenceValues(t *testing.T) { accountStatus.ToBytes(), ) - mr, err := newMigratorRuntime(address, []*ledger.Payload{accountStatusPayload}) + mr, err := NewAtreeRegisterMigratorRuntime(address, []*ledger.Payload{accountStatusPayload}) require.NoError(t, err) // Create new storage map @@ -140,7 +140,7 @@ func createTestPayloads(t *testing.T, address common.Address, domain string) []* accountStatus.ToBytes(), ) - mr, err := newMigratorRuntime(address, []*ledger.Payload{accountStatusPayload}) + mr, err := NewAtreeRegisterMigratorRuntime(address, []*ledger.Payload{accountStatusPayload}) require.NoError(t, err) // Create new storage map From b171fa82c51c40c510383146c628ede2e5168807 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Wed, 10 Apr 2024 12:03:20 -0700 Subject: [PATCH 096/148] remove unnecessary report writer factory field --- cmd/util/ledger/migrations/atree_register_migration.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/util/ledger/migrations/atree_register_migration.go b/cmd/util/ledger/migrations/atree_register_migration.go index 82674a0f766..f565e958256 100644 --- a/cmd/util/ledger/migrations/atree_register_migration.go +++ b/cmd/util/ledger/migrations/atree_register_migration.go @@ -31,7 +31,6 @@ type AtreeRegisterMigrator struct { sampler zerolog.Sampler rw reporters.ReportWriter - rwf reporters.ReportWriterFactory nWorkers int @@ -58,7 +57,6 @@ func NewAtreeRegisterMigrator( migrator := &AtreeRegisterMigrator{ sampler: sampler, - rwf: rwf, rw: rwf.ReportWriter("atree-register-migrator"), validateMigratedValues: validateMigratedValues, logVerboseValidationError: logVerboseValidationError, From a350225092bdb38f1e303875647b802679a70ab4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Wed, 10 Apr 2024 13:28:53 -0700 Subject: [PATCH 097/148] update to Cadence v0.42.10 --- go.mod | 2 +- go.sum | 4 ++-- insecure/go.mod | 2 +- insecure/go.sum | 4 ++-- integration/go.mod | 2 +- integration/go.sum | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index ce632fb3e6b..8ddab239286 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multihash v0.2.3 github.com/onflow/atree v0.6.0 - github.com/onflow/cadence v0.42.9 + github.com/onflow/cadence v0.42.10 github.com/onflow/crypto v0.25.1 github.com/onflow/flow v0.3.4 github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 diff --git a/go.sum b/go.sum index 32e504a9426..e38d2ea2a20 100644 --- a/go.sum +++ b/go.sum @@ -1351,8 +1351,8 @@ github.com/onflow/atree v0.6.0/go.mod h1:gBHU0M05qCbv9NN0kijLWMgC47gHVNBIp4KmsVF github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 h1:LpiQhTAfM9CAmNVEs0n//cBBgCg+vJSiIxTHYUklZ84= github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= github.com/onflow/cadence v0.20.1/go.mod h1:7mzUvPZUIJztIbr9eTvs+fQjWWHTF8veC+yk4ihcNIA= -github.com/onflow/cadence v0.42.9 h1:EX+eak/Jjy9PyKcAEmOViGOHMyP/nCOwJO+deodZlJE= -github.com/onflow/cadence v0.42.9/go.mod h1:1wFd+LiNiN6qoZXof3MBdpM6d8BsxbVIxOA77LbIYmE= +github.com/onflow/cadence v0.42.10 h1:3oC5ceeXhdCrhHcf9H0yYXQKW3Tw/vkSXLe+PUZa4i0= +github.com/onflow/cadence v0.42.10/go.mod h1:1wFd+LiNiN6qoZXof3MBdpM6d8BsxbVIxOA77LbIYmE= github.com/onflow/crypto v0.25.1 h1:0txy2PKPMM873JbpxQNbJmuOJtD56bfs48RQfm0ts5A= github.com/onflow/crypto v0.25.1/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onflow/flow v0.3.4 h1:FXUWVdYB90f/rjNcY0Owo30gL790tiYff9Pb/sycXYE= diff --git a/insecure/go.mod b/insecure/go.mod index 60e6b23d162..3195eeaa334 100644 --- a/insecure/go.mod +++ b/insecure/go.mod @@ -204,7 +204,7 @@ require ( github.com/multiformats/go-varint v0.0.7 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/onflow/atree v0.6.0 // indirect - github.com/onflow/cadence v0.42.9 // indirect + github.com/onflow/cadence v0.42.10 // indirect github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 // indirect github.com/onflow/flow-core-contracts/lib/go/templates v0.15.1 // indirect github.com/onflow/flow-ft/lib/go/contracts v0.7.1-0.20230711213910-baad011d2b13 // indirect diff --git a/insecure/go.sum b/insecure/go.sum index f584e0922cf..29793ab10f4 100644 --- a/insecure/go.sum +++ b/insecure/go.sum @@ -1316,8 +1316,8 @@ github.com/onflow/atree v0.1.0-beta1.0.20211027184039-559ee654ece9/go.mod h1:+6x github.com/onflow/atree v0.6.0 h1:j7nQ2r8npznx4NX39zPpBYHmdy45f4xwoi+dm37Jk7c= github.com/onflow/atree v0.6.0/go.mod h1:gBHU0M05qCbv9NN0kijLWMgC47gHVNBIp4KmsVFi0tc= github.com/onflow/cadence v0.20.1/go.mod h1:7mzUvPZUIJztIbr9eTvs+fQjWWHTF8veC+yk4ihcNIA= -github.com/onflow/cadence v0.42.9 h1:EX+eak/Jjy9PyKcAEmOViGOHMyP/nCOwJO+deodZlJE= -github.com/onflow/cadence v0.42.9/go.mod h1:1wFd+LiNiN6qoZXof3MBdpM6d8BsxbVIxOA77LbIYmE= +github.com/onflow/cadence v0.42.10 h1:3oC5ceeXhdCrhHcf9H0yYXQKW3Tw/vkSXLe+PUZa4i0= +github.com/onflow/cadence v0.42.10/go.mod h1:1wFd+LiNiN6qoZXof3MBdpM6d8BsxbVIxOA77LbIYmE= github.com/onflow/crypto v0.25.1 h1:0txy2PKPMM873JbpxQNbJmuOJtD56bfs48RQfm0ts5A= github.com/onflow/crypto v0.25.1/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 h1:xF5wHug6H8vKfz7p1LYy9jck6eD9K1HLjTdi6o4kg1k= diff --git a/integration/go.mod b/integration/go.mod index d7bfe88b5f5..8da959fe004 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -20,7 +20,7 @@ require ( github.com/ipfs/go-ds-badger2 v0.1.3 github.com/ipfs/go-ipfs-blockstore v1.3.0 github.com/libp2p/go-libp2p v0.32.2 - github.com/onflow/cadence v0.42.9 + github.com/onflow/cadence v0.42.10 github.com/onflow/crypto v0.25.1 github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 github.com/onflow/flow-core-contracts/lib/go/templates v0.15.1 diff --git a/integration/go.sum b/integration/go.sum index f5bac96f1bd..3cc3f03a335 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -1406,8 +1406,8 @@ github.com/onflow/atree v0.1.0-beta1.0.20211027184039-559ee654ece9/go.mod h1:+6x github.com/onflow/atree v0.6.1-0.20230711151834-86040b30171f h1:Z8/PgTqOgOg02MTRpTBYO2k16FE6z4wEOtaC2WBR9Xo= github.com/onflow/atree v0.6.1-0.20230711151834-86040b30171f/go.mod h1:xvP61FoOs95K7IYdIYRnNcYQGf4nbF/uuJ0tHf4DRuM= github.com/onflow/cadence v0.20.1/go.mod h1:7mzUvPZUIJztIbr9eTvs+fQjWWHTF8veC+yk4ihcNIA= -github.com/onflow/cadence v0.42.9 h1:EX+eak/Jjy9PyKcAEmOViGOHMyP/nCOwJO+deodZlJE= -github.com/onflow/cadence v0.42.9/go.mod h1:1wFd+LiNiN6qoZXof3MBdpM6d8BsxbVIxOA77LbIYmE= +github.com/onflow/cadence v0.42.10 h1:3oC5ceeXhdCrhHcf9H0yYXQKW3Tw/vkSXLe+PUZa4i0= +github.com/onflow/cadence v0.42.10/go.mod h1:1wFd+LiNiN6qoZXof3MBdpM6d8BsxbVIxOA77LbIYmE= github.com/onflow/crypto v0.25.1 h1:0txy2PKPMM873JbpxQNbJmuOJtD56bfs48RQfm0ts5A= github.com/onflow/crypto v0.25.1/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 h1:xF5wHug6H8vKfz7p1LYy9jck6eD9K1HLjTdi6o4kg1k= From 0a4cfb1ed4bb9b5aeb0c26031142f3678cb70393 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Wed, 10 Apr 2024 15:46:33 -0700 Subject: [PATCH 098/148] remove unused AccountsAtreeLedger --- cmd/util/ledger/migrations/utils.go | 53 ----------------------------- 1 file changed, 53 deletions(-) diff --git a/cmd/util/ledger/migrations/utils.go b/cmd/util/ledger/migrations/utils.go index ea790bf3ff1..2f7ca8ab6c9 100644 --- a/cmd/util/ledger/migrations/utils.go +++ b/cmd/util/ledger/migrations/utils.go @@ -8,63 +8,10 @@ import ( "github.com/onflow/cadence/runtime/common" "github.com/onflow/cadence/runtime/stdlib" - "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/convert" - "github.com/onflow/flow-go/model/flow" ) -type AccountsAtreeLedger struct { - Accounts environment.Accounts -} - -func NewAccountsAtreeLedger(accounts environment.Accounts) *AccountsAtreeLedger { - return &AccountsAtreeLedger{Accounts: accounts} -} - -var _ atree.Ledger = &AccountsAtreeLedger{} - -func (a *AccountsAtreeLedger) GetValue(owner, key []byte) ([]byte, error) { - v, err := a.Accounts.GetValue( - flow.NewRegisterID( - flow.BytesToAddress(owner), - string(key))) - if err != nil { - return nil, fmt.Errorf("getting value failed: %w", err) - } - return v, nil -} - -func (a *AccountsAtreeLedger) SetValue(owner, key, value []byte) error { - err := a.Accounts.SetValue( - flow.NewRegisterID( - flow.BytesToAddress(owner), - string(key)), - value) - if err != nil { - return fmt.Errorf("setting value failed: %w", err) - } - return nil -} - -func (a *AccountsAtreeLedger) ValueExists(owner, key []byte) (exists bool, err error) { - v, err := a.GetValue(owner, key) - if err != nil { - return false, fmt.Errorf("checking value existence failed: %w", err) - } - - return len(v) > 0, nil -} - -// AllocateStorageIndex allocates new storage index under the owner accounts to store a new register -func (a *AccountsAtreeLedger) AllocateStorageIndex(owner []byte) (atree.StorageIndex, error) { - v, err := a.Accounts.AllocateStorageIndex(flow.BytesToAddress(owner)) - if err != nil { - return atree.StorageIndex{}, fmt.Errorf("storage address allocation failed: %w", err) - } - return v, nil -} - func checkStorageHealth( address common.Address, storage *runtime.Storage, From 17d27c0b2b76cd158bcc0fac475126ba640a3b7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Wed, 10 Apr 2024 15:46:42 -0700 Subject: [PATCH 099/148] improve comment --- .../cmd/execution-state-extract/execution_state_extract.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/util/cmd/execution-state-extract/execution_state_extract.go b/cmd/util/cmd/execution-state-extract/execution_state_extract.go index 9f302ad0f5a..4b991936ae7 100644 --- a/cmd/util/cmd/execution-state-extract/execution_state_extract.go +++ b/cmd/util/cmd/execution-state-extract/execution_state_extract.go @@ -371,8 +371,7 @@ func newMigrations( &migrators.DeduplicateContractNamesMigration{}, - // This will fix storage used discrepancies caused by the - // DeduplicateContractNamesMigration. + // This will fix storage used discrepancies caused by the previous migrations &migrators.AccountUsageMigrator{}, }), } From 8a0839b71b432d402bb0aade8f09944582a52cee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Wed, 10 Apr 2024 15:53:02 -0700 Subject: [PATCH 100/148] generate set of all storage map domains from slice --- .../migrations/atree_register_migration.go | 6 +++--- .../migrations/cadence_value_validation.go | 2 +- cmd/util/ledger/migrations/utils.go | 18 ++++++++---------- 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/cmd/util/ledger/migrations/atree_register_migration.go b/cmd/util/ledger/migrations/atree_register_migration.go index 6722216e34a..32b7492f9a6 100644 --- a/cmd/util/ledger/migrations/atree_register_migration.go +++ b/cmd/util/ledger/migrations/atree_register_migration.go @@ -8,9 +8,9 @@ import ( runtime2 "runtime" "time" + "github.com/onflow/atree" "github.com/rs/zerolog" - "github.com/onflow/atree" "github.com/onflow/cadence/runtime" "github.com/onflow/cadence/runtime/common" "github.com/onflow/cadence/runtime/interpreter" @@ -185,7 +185,7 @@ func (m *AtreeRegisterMigrator) migrateAccountStorage( ) (map[flow.RegisterID]flow.RegisterValue, error) { // iterate through all domains and migrate them - for _, domain := range domains { + for _, domain := range allStorageMapDomains { err := m.convertStorageDomain(mr, storageMapIds, domain) if err != nil { return nil, fmt.Errorf("failed to convert storage domain %s : %w", domain, err) @@ -360,7 +360,7 @@ func (m *AtreeRegisterMigrator) validateChangesAndCreateNewRegisters( continue } - if _, isADomainKey := domainsLookupMap[id.Key]; isADomainKey { + if _, isADomainKey := allStorageMapDomainsSet[id.Key]; isADomainKey { // this is expected. Move it to the new payloads newPayloads = append(newPayloads, value) continue diff --git a/cmd/util/ledger/migrations/cadence_value_validation.go b/cmd/util/ledger/migrations/cadence_value_validation.go index 755b5ffd538..6850a1e6b13 100644 --- a/cmd/util/ledger/migrations/cadence_value_validation.go +++ b/cmd/util/ledger/migrations/cadence_value_validation.go @@ -33,7 +33,7 @@ func validateCadenceValues( } // Iterate through all domains and compare cadence values. - for _, domain := range domains { + for _, domain := range allStorageMapDomains { err := validateStorageDomain(address, oldRuntime, newRuntime, domain, log, verboseLogging) if err != nil { return err diff --git a/cmd/util/ledger/migrations/utils.go b/cmd/util/ledger/migrations/utils.go index 2f7ca8ab6c9..f9ce19b84e8 100644 --- a/cmd/util/ledger/migrations/utils.go +++ b/cmd/util/ledger/migrations/utils.go @@ -40,15 +40,14 @@ func checkStorageHealth( } } - for _, domain := range domains { + for _, domain := range allStorageMapDomains { _ = storage.GetStorageMap(address, domain, false) } return storage.CheckHealth() } -// convert all domains -var domains = []string{ +var allStorageMapDomains = []string{ common.PathDomainStorage.Identifier(), common.PathDomainPrivate.Identifier(), common.PathDomainPublic.Identifier(), @@ -57,11 +56,10 @@ var domains = []string{ stdlib.CapabilityControllerStorageDomain, } -var domainsLookupMap = map[string]struct{}{ - common.PathDomainStorage.Identifier(): {}, - common.PathDomainPrivate.Identifier(): {}, - common.PathDomainPublic.Identifier(): {}, - runtime.StorageDomainContract: {}, - stdlib.InboxStorageDomain: {}, - stdlib.CapabilityControllerStorageDomain: {}, +var allStorageMapDomainsSet = map[string]struct{}{} + +func init() { + for _, domain := range allStorageMapDomains { + allStorageMapDomainsSet[domain] = struct{}{} + } } From fa9f719a26a3f688a32b541d70cb9dc9ee2e2cb2 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Fri, 12 Apr 2024 14:28:01 +0200 Subject: [PATCH 101/148] remove temporary code --- integration/benchmark/server/control.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/integration/benchmark/server/control.sh b/integration/benchmark/server/control.sh index 02bebb100aa..26ecad5a289 100755 --- a/integration/benchmark/server/control.sh +++ b/integration/benchmark/server/control.sh @@ -21,8 +21,7 @@ commits_file="/opt/commits.recent" load_types=("token-transfer" "create-account" "ledger-heavy" "evm-transfer") # get the merge commits from the last week from master ordered by author date -# TEMPORARY: DO NOT MERGE!! -for commit in $(git log --first-parent --format="%S:%H" origin/janez/improve-tps-metering --since '1 week' --author-date-order | head -1) +for commit in $(git log --merges --first-parent --format="%S:%H" origin/master --since '1 week' --author-date-order ) do for load in "${load_types[@]}" do From f4ea637cc0b3465d2dc7a108679b12e4b813513c Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 15:06:56 +0200 Subject: [PATCH 102/148] add timestamp to block type --- fvm/evm/types/block.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fvm/evm/types/block.go b/fvm/evm/types/block.go index a7f10938b67..ba0c404d8e3 100644 --- a/fvm/evm/types/block.go +++ b/fvm/evm/types/block.go @@ -19,6 +19,10 @@ type Block struct { // Height returns the height of this block Height uint64 + // Timestamp represents the time at which the block was created + // Note that this value must be provided from the FVM Block + Timestamp int64 + // holds the total amount of the native token deposited in the evm side. (in attoflow) TotalSupply *big.Int @@ -66,6 +70,7 @@ func (b *Block) AppendTxHash(txHash gethCommon.Hash) { func NewBlock( parentBlockHash gethCommon.Hash, height uint64, + timestamp int64, totalSupply *big.Int, receiptRoot gethCommon.Hash, txHashes []gethCommon.Hash, @@ -73,6 +78,7 @@ func NewBlock( return &Block{ ParentBlockHash: parentBlockHash, Height: height, + Timestamp: timestamp, TotalSupply: totalSupply, ReceiptRoot: receiptRoot, TransactionHashes: txHashes, From b1842643290bf1716207c6fe6fed46be63c678f6 Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 15:07:16 +0200 Subject: [PATCH 103/148] retrieve timestamp on new block --- fvm/evm/handler/blockstore.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/fvm/evm/handler/blockstore.go b/fvm/evm/handler/blockstore.go index a93fc9bec88..aadcb04e499 100644 --- a/fvm/evm/handler/blockstore.go +++ b/fvm/evm/handler/blockstore.go @@ -1,6 +1,7 @@ package handler import ( + "fmt" gethCommon "github.com/onflow/go-ethereum/common" "github.com/onflow/flow-go/fvm/evm/types" @@ -35,6 +36,19 @@ func (bs *BlockStore) BlockProposal() (*types.Block, error) { return bs.blockProposal, nil } + cadenceHeight, err := bs.backend.GetCurrentBlockHeight() + if err != nil { + return nil, err + } + + cadenceBlock, found, err := bs.backend.GetBlockAtHeight(cadenceHeight) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("cadence block not found") + } + lastExecutedBlock, err := bs.LatestBlock() if err != nil { return nil, err @@ -45,8 +59,10 @@ func (bs *BlockStore) BlockProposal() (*types.Block, error) { return nil, err } - bs.blockProposal = types.NewBlock(parentHash, + bs.blockProposal = types.NewBlock( + parentHash, lastExecutedBlock.Height+1, + cadenceBlock.Timestamp, lastExecutedBlock.TotalSupply, gethCommon.Hash{}, make([]gethCommon.Hash, 0), From 1f712ce06887bc571b3b106aaaffdcedfe02e167 Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 15:12:18 +0200 Subject: [PATCH 104/148] emit timestamp on block event --- fvm/evm/types/events.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fvm/evm/types/events.go b/fvm/evm/types/events.go index 920f6216a2c..f4dbd6ea121 100644 --- a/fvm/evm/types/events.go +++ b/fvm/evm/types/events.go @@ -96,7 +96,8 @@ func init() { ) } -// we might break this event into two (tx included /tx executed) if size becomes an issue +// todo we might have to break this event into two (tx included /tx executed) if size becomes an issue + type TransactionExecutedPayload struct { BlockHeight uint64 TxEncoded []byte @@ -175,6 +176,7 @@ var blockExecutedEventCadenceType = &cadence.EventType{ Fields: []cadence.Field{ cadence.NewField("height", cadence.UInt64Type{}), cadence.NewField("hash", cadence.StringType{}), + cadence.NewField("timestamp", cadence.Int64Type{}), cadence.NewField("totalSupply", cadence.IntType{}), cadence.NewField("parentHash", cadence.StringType{}), cadence.NewField("receiptRoot", cadence.StringType{}), @@ -203,6 +205,7 @@ func (p *BlockExecutedEventPayload) CadenceEvent() (cadence.Event, error) { fields := []cadence.Value{ cadence.NewUInt64(p.Block.Height), cadence.String(blockHash.String()), + cadence.NewInt64(p.Block.Timestamp), cadence.NewIntFromBig(p.Block.TotalSupply), cadence.String(p.Block.ParentBlockHash.String()), cadence.String(p.Block.ReceiptRoot.String()), From 307620a39e422b53ad8fc638be55ce1bf9f1d41b Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 15:18:12 +0200 Subject: [PATCH 105/148] default test block info --- fvm/evm/testutils/backend.go | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/fvm/evm/testutils/backend.go b/fvm/evm/testutils/backend.go index a8c831ca7d4..1beb2dfbf3c 100644 --- a/fvm/evm/testutils/backend.go +++ b/fvm/evm/testutils/backend.go @@ -4,6 +4,7 @@ import ( "crypto/rand" "encoding/binary" "fmt" + "github.com/onflow/cadence/runtime/stdlib" "testing" "github.com/onflow/atree" @@ -35,7 +36,7 @@ func RunWithTestBackend(t testing.TB, f func(*TestBackend)) { TestValueStore: GetSimpleValueStore(), testEventEmitter: getSimpleEventEmitter(), testMeter: getSimpleMeter(), - TestBlockInfo: &TestBlockInfo{}, + TestBlockInfo: getSimpleBlockStore(), TestRandomGenerator: getSimpleRandomGenerator(), TestContractFunctionInvoker: &TestContractFunctionInvoker{}, } @@ -157,6 +158,24 @@ func getSimpleMeter() *testMeter { } } +func getSimpleBlockStore() *TestBlockInfo { + var index int64 = 1 + return &TestBlockInfo{ + GetCurrentBlockHeightFunc: func() (uint64, error) { + index++ + return uint64(index), nil + }, + GetBlockAtHeightFunc: func(height uint64) (runtime.Block, bool, error) { + return runtime.Block{ + Height: height, + View: 0, + Hash: stdlib.BlockHash{}, + Timestamp: int64(height), + }, true, nil + }, + } +} + type TestBackend struct { *TestValueStore *testMeter From 9fab4847404563e099b23984c45d2b76b5eedef1 Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 15:22:56 +0200 Subject: [PATCH 106/148] change timestamp type due to rlp serialisation issue --- fvm/evm/handler/blockstore.go | 2 +- fvm/evm/types/block.go | 4 ++-- fvm/evm/types/events.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fvm/evm/handler/blockstore.go b/fvm/evm/handler/blockstore.go index aadcb04e499..0fdd66b8806 100644 --- a/fvm/evm/handler/blockstore.go +++ b/fvm/evm/handler/blockstore.go @@ -62,7 +62,7 @@ func (bs *BlockStore) BlockProposal() (*types.Block, error) { bs.blockProposal = types.NewBlock( parentHash, lastExecutedBlock.Height+1, - cadenceBlock.Timestamp, + uint64(cadenceBlock.Timestamp), lastExecutedBlock.TotalSupply, gethCommon.Hash{}, make([]gethCommon.Hash, 0), diff --git a/fvm/evm/types/block.go b/fvm/evm/types/block.go index ba0c404d8e3..d5ea32979e9 100644 --- a/fvm/evm/types/block.go +++ b/fvm/evm/types/block.go @@ -21,7 +21,7 @@ type Block struct { // Timestamp represents the time at which the block was created // Note that this value must be provided from the FVM Block - Timestamp int64 + Timestamp uint64 // holds the total amount of the native token deposited in the evm side. (in attoflow) TotalSupply *big.Int @@ -70,7 +70,7 @@ func (b *Block) AppendTxHash(txHash gethCommon.Hash) { func NewBlock( parentBlockHash gethCommon.Hash, height uint64, - timestamp int64, + timestamp uint64, totalSupply *big.Int, receiptRoot gethCommon.Hash, txHashes []gethCommon.Hash, diff --git a/fvm/evm/types/events.go b/fvm/evm/types/events.go index f4dbd6ea121..319274041a7 100644 --- a/fvm/evm/types/events.go +++ b/fvm/evm/types/events.go @@ -205,7 +205,7 @@ func (p *BlockExecutedEventPayload) CadenceEvent() (cadence.Event, error) { fields := []cadence.Value{ cadence.NewUInt64(p.Block.Height), cadence.String(blockHash.String()), - cadence.NewInt64(p.Block.Timestamp), + cadence.NewUInt64(p.Block.Timestamp), cadence.NewIntFromBig(p.Block.TotalSupply), cadence.String(p.Block.ParentBlockHash.String()), cadence.String(p.Block.ReceiptRoot.String()), From c39adbf46c093bc5ced0c1ae873780b4753f3ca1 Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 15:42:16 +0200 Subject: [PATCH 107/148] fix timestamp event type --- fvm/evm/types/events.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fvm/evm/types/events.go b/fvm/evm/types/events.go index 319274041a7..15a061bcea9 100644 --- a/fvm/evm/types/events.go +++ b/fvm/evm/types/events.go @@ -176,7 +176,7 @@ var blockExecutedEventCadenceType = &cadence.EventType{ Fields: []cadence.Field{ cadence.NewField("height", cadence.UInt64Type{}), cadence.NewField("hash", cadence.StringType{}), - cadence.NewField("timestamp", cadence.Int64Type{}), + cadence.NewField("timestamp", cadence.UInt64Type{}), cadence.NewField("totalSupply", cadence.IntType{}), cadence.NewField("parentHash", cadence.StringType{}), cadence.NewField("receiptRoot", cadence.StringType{}), From ab1ee1e89abbbc658992e0a1fd5005948bafa50b Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 15:44:13 +0200 Subject: [PATCH 108/148] test timestamp on block --- fvm/evm/evm_test.go | 9 ++++++++- fvm/evm/types/events_test.go | 3 +++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/fvm/evm/evm_test.go b/fvm/evm/evm_test.go index 7d5b0bdc817..fc9f01bd620 100644 --- a/fvm/evm/evm_test.go +++ b/fvm/evm/evm_test.go @@ -1058,6 +1058,7 @@ func RunWithNewEnvironment( fvm.WithAuthorizationChecksEnabled(false), fvm.WithSequenceNumberCheckAndIncrementEnabled(false), fvm.WithEntropyProvider(testutil.EntropyProviderFixture(nil)), + fvm.WithBlocks(blocks), } ctx := fvm.NewContext(opts...) @@ -1077,7 +1078,13 @@ func RunWithNewEnvironment( snapshotTree = snapshotTree.Append(executionSnapshot) - f(fvm.NewContextFromParent(ctx, fvm.WithEVMEnabled(true)), vm, snapshotTree, testContract, testAccount) + f( + fvm.NewContextFromParent(ctx, fvm.WithEVMEnabled(true)), + vm, + snapshotTree, + testContract, + testAccount, + ) }) }) }) diff --git a/fvm/evm/types/events_test.go b/fvm/evm/types/events_test.go index 0904afc1fda..cdea389c0d9 100644 --- a/fvm/evm/types/events_test.go +++ b/fvm/evm/types/events_test.go @@ -22,6 +22,7 @@ import ( type blockEventPayload struct { Height uint64 `cadence:"height"` Hash string `cadence:"hash"` + Timestamp uint64 `cadence:"timestamp"` TotalSupply cadence.Int `cadence:"totalSupply"` ParentBlockHash string `cadence:"parentHash"` ReceiptRoot string `cadence:"receiptRoot"` @@ -47,6 +48,7 @@ func TestEVMBlockExecutedEventCCFEncodingDecoding(t *testing.T) { block := &types.Block{ Height: 2, + Timestamp: 100, TotalSupply: big.NewInt(1500), ParentBlockHash: gethCommon.HexToHash("0x2813452cff514c3054ac9f40cd7ce1b016cc78ab7f99f1c6d49708837f6e06d1"), ReceiptRoot: gethCommon.Hash{}, @@ -70,6 +72,7 @@ func TestEVMBlockExecutedEventCCFEncodingDecoding(t *testing.T) { assert.Equal(t, bep.Hash, blockHash.Hex()) assert.Equal(t, bep.TotalSupply.Value, block.TotalSupply) + assert.Equal(t, bep.Timestamp, block.Timestamp) assert.Equal(t, bep.ParentBlockHash, block.ParentBlockHash.Hex()) assert.Equal(t, bep.ReceiptRoot, block.ReceiptRoot.Hex()) From cc662a89b4b5ab9d14432326c8fa0db3592b38d9 Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 17:24:08 +0200 Subject: [PATCH 109/148] fix fvm test with blocks info --- fvm/fvm_test.go | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 900a9f7a56f..47dbd19ee4b 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -4,6 +4,7 @@ import ( "crypto/rand" "encoding/hex" "fmt" + envMock "github.com/onflow/flow-go/fvm/environment/mock" "math" "strings" "testing" @@ -3061,12 +3062,24 @@ func TestTransientNetworkCoreContractAddresses(t *testing.T) { } func TestEVM(t *testing.T) { + blocks := new(envMock.Blocks) + block1 := unittest.BlockFixture() + blocks.On("ByHeightFrom", + block1.Header.Height, + block1.Header, + ).Return(block1.Header, nil) + + ctxOpts := []fvm.Option{ + fvm.WithChain(flow.Emulator.Chain()), + fvm.WithEVMEnabled(true), + fvm.WithBlocks(blocks), + fvm.WithBlockHeader(block1.Header), + fvm.WithCadenceLogging(true), + } + t.Run("successful transaction", newVMTest(). withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). - withContextOptions( - fvm.WithEVMEnabled(true), - fvm.WithCadenceLogging(true), - ). + withContextOptions(ctxOpts...). run(func( t *testing.T, vm fvm.VM, @@ -3122,10 +3135,7 @@ func TestEVM(t *testing.T) { // this test makes sure the execution error is correctly handled and returned as a correct type t.Run("execution reverted", newVMTest(). withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). - withContextOptions( - fvm.WithChain(flow.Emulator.Chain()), - fvm.WithEVMEnabled(true), - ). + withContextOptions(ctxOpts...). run(func( t *testing.T, vm fvm.VM, @@ -3162,10 +3172,7 @@ func TestEVM(t *testing.T) { // we have implemented a snapshot wrapper to return an error from the EVM t.Run("internal evm error handling", newVMTest(). withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). - withContextOptions( - fvm.WithChain(flow.Emulator.Chain()), - fvm.WithEVMEnabled(true), - ). + withContextOptions(ctxOpts...). run(func( t *testing.T, vm fvm.VM, @@ -3226,6 +3233,8 @@ func TestEVM(t *testing.T) { // so we have to use emulator here so that the EVM storage contract is deployed // to the 5th address fvm.WithChain(flow.Emulator.Chain()), + fvm.WithBlocks(blocks), + fvm.WithBlockHeader(block1.Header), ). run(func( t *testing.T, From 013c10fc76fd597f597e6b5a756488444029b233 Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 18:08:27 +0200 Subject: [PATCH 110/148] fix lint --- fvm/evm/handler/blockstore.go | 1 + fvm/evm/testutils/backend.go | 3 ++- fvm/fvm_test.go | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/fvm/evm/handler/blockstore.go b/fvm/evm/handler/blockstore.go index 0fdd66b8806..b827ffe0088 100644 --- a/fvm/evm/handler/blockstore.go +++ b/fvm/evm/handler/blockstore.go @@ -2,6 +2,7 @@ package handler import ( "fmt" + gethCommon "github.com/onflow/go-ethereum/common" "github.com/onflow/flow-go/fvm/evm/types" diff --git a/fvm/evm/testutils/backend.go b/fvm/evm/testutils/backend.go index 1beb2dfbf3c..472d38201f4 100644 --- a/fvm/evm/testutils/backend.go +++ b/fvm/evm/testutils/backend.go @@ -4,9 +4,10 @@ import ( "crypto/rand" "encoding/binary" "fmt" - "github.com/onflow/cadence/runtime/stdlib" "testing" + "github.com/onflow/cadence/runtime/stdlib" + "github.com/onflow/atree" "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 47dbd19ee4b..7fe79a3d033 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -4,11 +4,12 @@ import ( "crypto/rand" "encoding/hex" "fmt" - envMock "github.com/onflow/flow-go/fvm/environment/mock" "math" "strings" "testing" + envMock "github.com/onflow/flow-go/fvm/environment/mock" + "github.com/onflow/cadence" "github.com/onflow/cadence/encoding/ccf" jsoncdc "github.com/onflow/cadence/encoding/json" From 203a05d6a89c6a5545fbbb0db2f50df5b89f55d8 Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 18:53:33 +0200 Subject: [PATCH 111/148] add block timestamp to context --- fvm/evm/emulator/emulator.go | 1 + fvm/evm/handler/handler.go | 1 + fvm/evm/types/emulator.go | 1 + 3 files changed, 3 insertions(+) diff --git a/fvm/evm/emulator/emulator.go b/fvm/evm/emulator/emulator.go index feff252deaa..9e165c05c0b 100644 --- a/fvm/evm/emulator/emulator.go +++ b/fvm/evm/emulator/emulator.go @@ -39,6 +39,7 @@ func newConfig(ctx types.BlockContext) *Config { return NewConfig( WithChainID(ctx.ChainID), WithBlockNumber(new(big.Int).SetUint64(ctx.BlockNumber)), + WithBlockTime(ctx.BlockTimestamp), WithCoinbase(ctx.GasFeeCollector.ToCommon()), WithDirectCallBaseGasUsage(ctx.DirectCallBaseGasUsage), WithExtraPrecompiles(ctx.ExtraPrecompiles), diff --git a/fvm/evm/handler/handler.go b/fvm/evm/handler/handler.go index 111376f223d..663c71c8c03 100644 --- a/fvm/evm/handler/handler.go +++ b/fvm/evm/handler/handler.go @@ -253,6 +253,7 @@ func (h *ContractHandler) getBlockContext() (types.BlockContext, error) { return types.BlockContext{ ChainID: types.EVMChainIDFromFlowChainID(h.flowChainID), BlockNumber: bp.Height, + BlockTimestamp: bp.Timestamp, DirectCallBaseGasUsage: types.DefaultDirectCallBaseGasUsage, GetHashFunc: func(n uint64) gethCommon.Hash { hash, err := h.blockStore.BlockHash(n) diff --git a/fvm/evm/types/emulator.go b/fvm/evm/types/emulator.go index 3d684d525b1..b0118df2719 100644 --- a/fvm/evm/types/emulator.go +++ b/fvm/evm/types/emulator.go @@ -26,6 +26,7 @@ type Precompile interface { type BlockContext struct { ChainID *big.Int BlockNumber uint64 + BlockTimestamp uint64 DirectCallBaseGasUsage uint64 DirectCallGasPrice uint64 GasFeeCollector Address From b8f8c2a00de406a0f461a42888472928fb7ae0fd Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 19:00:11 +0200 Subject: [PATCH 112/148] add timestamp to evm block contract --- fvm/evm/stdlib/contract.cdc | 6 +++++- fvm/evm/stdlib/contract.go | 8 ++++++++ fvm/evm/stdlib/contract_test.go | 3 +++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/fvm/evm/stdlib/contract.cdc b/fvm/evm/stdlib/contract.cdc index c3b7fb3a52f..ff87ab299a3 100644 --- a/fvm/evm/stdlib/contract.cdc +++ b/fvm/evm/stdlib/contract.cdc @@ -476,10 +476,14 @@ contract EVM { access(all) let totalSupply: Int - init(height: UInt64, hash: String, totalSupply: Int) { + access(all) + let timestamp: UInt64 + + init(height: UInt64, hash: String, totalSupply: Int, timestamp: UInt64) { self.height = height self.hash = hash self.totalSupply = totalSupply + self.timestamp = timestamp } } diff --git a/fvm/evm/stdlib/contract.go b/fvm/evm/stdlib/contract.go index c9e62317a8b..6af70b7f7c5 100644 --- a/fvm/evm/stdlib/contract.go +++ b/fvm/evm/stdlib/contract.go @@ -1773,6 +1773,10 @@ func NewEVMBlockValue( }, ), }, + { + Name: "timestamp", + Value: interpreter.UInt64Value(block.Timestamp), + }, }, common.ZeroAddress, ) @@ -2044,6 +2048,10 @@ func NewEVMBlockCadenceType(address common.Address) *cadence.StructType { Identifier: "totalSupply", Type: cadence.IntType{}, }, + { + Identifier: "timestamp", + Type: cadence.UInt64Type{}, + }, }, nil, ) diff --git a/fvm/evm/stdlib/contract_test.go b/fvm/evm/stdlib/contract_test.go index f92cb52ab68..9f9669e9d28 100644 --- a/fvm/evm/stdlib/contract_test.go +++ b/fvm/evm/stdlib/contract_test.go @@ -4131,6 +4131,7 @@ func TestEVMGetLatestBlock(t *testing.T) { latestBlock := &types.Block{ Height: uint64(2), TotalSupply: big.NewInt(1500000000000000000), + Timestamp: uint64(1337), } handler := &testContractHandler{ evmContractAddress: common.Address(contractsAddress), @@ -4158,6 +4159,7 @@ func TestEVMGetLatestBlock(t *testing.T) { blockHash, err := cadence.NewString(hash.Hex()) require.NoError(t, err) blockTotalSupply := cadence.NewIntFromBig(latestBlock.TotalSupply) + timestamp := cadence.NewUInt64(latestBlock.Timestamp) expectedEVMBlock := cadence.Struct{ StructType: evmBlockCadenceType, @@ -4165,6 +4167,7 @@ func TestEVMGetLatestBlock(t *testing.T) { blockHeight, blockHash, blockTotalSupply, + timestamp, }, } From 72d01a4ace2e4ab9d3c685b6930b9b4d396f521c Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 19:11:27 +0200 Subject: [PATCH 113/148] check timestamp on block test --- fvm/evm/evm_test.go | 63 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/fvm/evm/evm_test.go b/fvm/evm/evm_test.go index fc9f01bd620..9f28582d63f 100644 --- a/fvm/evm/evm_test.go +++ b/fvm/evm/evm_test.go @@ -249,6 +249,69 @@ func TestEVMRun(t *testing.T) { }) } +func TestEVMBlockData(t *testing.T) { + t.Parallel() + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + + // query the block timestamp + code := []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.run(tx: tx, coinbase: coinbase) + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "blockTime"), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + coinbase := cadence.NewArray( + ConvertToCadence(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + innerTx := cadence.NewArray( + ConvertToCadence(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + script := fvm.Script(code).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + res, err := stdlib.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + require.Equal(t, ctx.BlockHeader.Timestamp.UnixNano(), new(big.Int).SetBytes(res.ReturnedValue).Int64()) + + }) +} + func TestEVMAddressDeposit(t *testing.T) { t.Parallel() From 267c6c3952ad4b7e9bb495e00eaafd6b5ae51b8a Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 12 Apr 2024 10:17:37 -0700 Subject: [PATCH 114/148] addres review comments for throttle --- engine/execution/ingestion/throttle.go | 30 +++++++++++++-------- engine/execution/ingestion/throttle_test.go | 16 +++++++++++ 2 files changed, 35 insertions(+), 11 deletions(-) create mode 100644 engine/execution/ingestion/throttle_test.go diff --git a/engine/execution/ingestion/throttle.go b/engine/execution/ingestion/throttle.go index 480388acb88..a92566b6660 100644 --- a/engine/execution/ingestion/throttle.go +++ b/engine/execution/ingestion/throttle.go @@ -12,10 +12,10 @@ import ( "github.com/onflow/flow-go/storage" ) -// CatchUpThreshold is the number of blocks that if the execution is far behind +// DefaultCatchUpThreshold is the number of blocks that if the execution is far behind // the finalization then we will only lazy load the next unexecuted finalized // blocks until the execution has caught up -const CatchUpThreshold = 500 +const DefaultCatchUpThreshold = 500 // BlockThrottle is a helper struct that helps throttle the unexecuted blocks to be sent // to the block queue for execution. @@ -31,7 +31,6 @@ type BlockThrottle struct { mu sync.Mutex executed uint64 finalized uint64 - inited bool // notifier processables chan<- flow.Identifier @@ -74,15 +73,18 @@ func NewBlockThrottle( }, nil } +// inited returns true if the throttle has been inited +func (c *BlockThrottle) inited() bool { + return c.processables != nil +} + func (c *BlockThrottle) Init(processables chan<- flow.Identifier) error { c.mu.Lock() defer c.mu.Unlock() - c.log.Info().Msgf("initializing block throttle") - if c.inited { + if c.inited() { return fmt.Errorf("throttle already inited") } - c.inited = true c.processables = processables var unexecuted []flow.Identifier @@ -94,13 +96,18 @@ func (c *BlockThrottle) Init(processables chan<- flow.Identifier) error { } c.log.Info().Msgf("loaded %d unexecuted blocks", len(unexecuted)) } else { - unexecuted, err = findFinalized(c.state, c.headers, c.executed, c.executed+500) + unexecuted, err = findFinalized(c.state, c.headers, c.executed, c.executed+uint64(c.threshold)) if err != nil { return err } c.log.Info().Msgf("loaded %d unexecuted finalized blocks", len(unexecuted)) } + c.log.Info().Msgf("throttle initializing with %d unexecuted blocks", len(unexecuted)) + + // the ingestion core engine must have initialized the 'processables' with 10000 (default) buffer size, + // and the 'unexecuted' will only contain up to DefaultCatchUpThreshold (500) blocks, + // so pushing all the unexecuted to processables won't be blocked. for _, id := range unexecuted { c.processables <- id } @@ -114,7 +121,7 @@ func (c *BlockThrottle) OnBlockExecuted(_ flow.Identifier, executed uint64) erro c.mu.Lock() defer c.mu.Unlock() - if !c.inited { + if !c.inited() { return fmt.Errorf("throttle not inited") } @@ -155,7 +162,7 @@ func (c *BlockThrottle) OnBlock(blockID flow.Identifier) error { defer c.mu.Unlock() c.log.Debug().Msgf("recieved block (%v)", blockID) - if !c.inited { + if !c.inited() { return fmt.Errorf("throttle not inited") } @@ -174,7 +181,7 @@ func (c *BlockThrottle) OnBlock(blockID flow.Identifier) error { func (c *BlockThrottle) OnBlockFinalized(lastFinalized *flow.Header) { c.mu.Lock() defer c.mu.Unlock() - if !c.inited { + if !c.inited() { return } @@ -216,11 +223,12 @@ func findFinalized(state protocol.State, headers storage.Headers, lastExecuted, for height := lastExecuted + 1; height <= final.Height; height++ { finalizedID, err := headers.BlockIDByHeight(height) if err != nil { - return nil, fmt.Errorf("could not get header at height: %v, %w", height, err) + return nil, fmt.Errorf("could not get block ID by height %v: %w", height, err) } unexecutedFinalized = append(unexecutedFinalized, finalizedID) } + return unexecutedFinalized, nil } diff --git a/engine/execution/ingestion/throttle_test.go b/engine/execution/ingestion/throttle_test.go new file mode 100644 index 00000000000..a2d8911b109 --- /dev/null +++ b/engine/execution/ingestion/throttle_test.go @@ -0,0 +1,16 @@ +package ingestion + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCaughtUp(t *testing.T) { + require.True(t, caughtUp(100, 200, 500)) + require.True(t, caughtUp(100, 100, 500)) + require.True(t, caughtUp(100, 600, 500)) + + require.False(t, caughtUp(100, 601, 500)) + require.False(t, caughtUp(100, 602, 500)) +} From 95b468da279d928836ebd90098cb6ff2f46fcf47 Mon Sep 17 00:00:00 2001 From: Gregor Gololicic Date: Fri, 12 Apr 2024 19:19:30 +0200 Subject: [PATCH 115/148] use unix seconds not nanoseconds --- fvm/evm/evm_test.go | 2 +- fvm/evm/handler/blockstore.go | 7 ++++++- fvm/evm/types/block.go | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/fvm/evm/evm_test.go b/fvm/evm/evm_test.go index 9f28582d63f..ecbd3b88133 100644 --- a/fvm/evm/evm_test.go +++ b/fvm/evm/evm_test.go @@ -307,7 +307,7 @@ func TestEVMBlockData(t *testing.T) { require.NoError(t, err) require.Equal(t, types.StatusSuccessful, res.Status) require.Equal(t, types.ErrCodeNoError, res.ErrorCode) - require.Equal(t, ctx.BlockHeader.Timestamp.UnixNano(), new(big.Int).SetBytes(res.ReturnedValue).Int64()) + require.Equal(t, ctx.BlockHeader.Timestamp.Unix(), new(big.Int).SetBytes(res.ReturnedValue).Int64()) }) } diff --git a/fvm/evm/handler/blockstore.go b/fvm/evm/handler/blockstore.go index b827ffe0088..32c282a0d84 100644 --- a/fvm/evm/handler/blockstore.go +++ b/fvm/evm/handler/blockstore.go @@ -2,6 +2,7 @@ package handler import ( "fmt" + "time" gethCommon "github.com/onflow/go-ethereum/common" @@ -60,10 +61,14 @@ func (bs *BlockStore) BlockProposal() (*types.Block, error) { return nil, err } + // cadence block timestamp is unix nanoseconds but evm blocks + // expect timestamps in unix seconds so we convert here + timestamp := uint64(cadenceBlock.Timestamp / int64(time.Second)) + bs.blockProposal = types.NewBlock( parentHash, lastExecutedBlock.Height+1, - uint64(cadenceBlock.Timestamp), + timestamp, lastExecutedBlock.TotalSupply, gethCommon.Hash{}, make([]gethCommon.Hash, 0), diff --git a/fvm/evm/types/block.go b/fvm/evm/types/block.go index d5ea32979e9..6c70903bbea 100644 --- a/fvm/evm/types/block.go +++ b/fvm/evm/types/block.go @@ -19,7 +19,7 @@ type Block struct { // Height returns the height of this block Height uint64 - // Timestamp represents the time at which the block was created + // Timestamp is a Unix timestamp in seconds at which the block was created // Note that this value must be provided from the FVM Block Timestamp uint64 From 3d1a534794ccf8ce0dd04d3abce5c7e02e35cee4 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 12 Apr 2024 10:59:40 -0700 Subject: [PATCH 116/148] add test cases for flow.Deduplicate --- model/flow/entity.go | 5 +++++ model/flow/entity_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 model/flow/entity_test.go diff --git a/model/flow/entity.go b/model/flow/entity.go index 9708b48cd7d..f106e22eebb 100644 --- a/model/flow/entity.go +++ b/model/flow/entity.go @@ -29,7 +29,12 @@ func EntitiesToIDs[T Entity](entities []T) []Identifier { } // Deduplicate entities in a slice by the ID method +// The original order of the entities is preserved. func Deduplicate[T IDEntity](entities []T) []T { + if entities == nil { + return nil + } + seen := make(map[Identifier]struct{}, len(entities)) result := make([]T, 0, len(entities)) diff --git a/model/flow/entity_test.go b/model/flow/entity_test.go new file mode 100644 index 00000000000..bb926159675 --- /dev/null +++ b/model/flow/entity_test.go @@ -0,0 +1,24 @@ +package flow_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestDeduplicate(t *testing.T) { + require.Nil(t, flow.Deduplicate[*flow.Collection](nil)) + + cols := unittest.CollectionListFixture(5) + require.Equal(t, cols, flow.Deduplicate(cols)) + + // create duplicates, and validate + require.Equal(t, cols, flow.Deduplicate[*flow.Collection](append(cols, cols...))) + + // verify the original order should be preserved + require.Equal(t, cols, flow.Deduplicate[*flow.Collection]( + append(cols, cols[3], cols[1], cols[4], cols[2], cols[0]))) +} From b406985095482de966fac21abd64d5c8f77b45c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Fri, 12 Apr 2024 17:00:38 -0700 Subject: [PATCH 117/148] clean up EVM environment setup --- fvm/evm/evm.go | 23 +++++++++++++++++------ fvm/evm/stdlib/contract.go | 8 ++++++-- fvm/evm/stdlib/contract_test.go | 8 ++++---- fvm/script.go | 1 - fvm/transactionInvoker.go | 2 -- 5 files changed, 27 insertions(+), 15 deletions(-) diff --git a/fvm/evm/evm.go b/fvm/evm/evm.go index 44983524fcb..a29ab9f67cf 100644 --- a/fvm/evm/evm.go +++ b/fvm/evm/evm.go @@ -27,7 +27,6 @@ func SetupEnvironment( chainID flow.ChainID, fvmEnv environment.Environment, runtimeEnv runtime.Environment, - service flow.Address, flowToken flow.Address, ) error { evmStorageAccountAddress, err := StorageAccountAddress(chainID) @@ -42,15 +41,27 @@ func SetupEnvironment( backend := backends.NewWrappedEnvironment(fvmEnv) - em := evm.NewEmulator(backend, evmStorageAccountAddress) + emulator := evm.NewEmulator(backend, evmStorageAccountAddress) - bs := handler.NewBlockStore(backend, evmStorageAccountAddress) + blockStore := handler.NewBlockStore(backend, evmStorageAccountAddress) - aa := handler.NewAddressAllocator() + addressAllocator := handler.NewAddressAllocator() - contractHandler := handler.NewContractHandler(chainID, evmContractAccountAddress, common.Address(flowToken), bs, aa, backend, em) + contractHandler := handler.NewContractHandler( + chainID, + evmContractAccountAddress, + common.Address(flowToken), + blockStore, + addressAllocator, + backend, + emulator, + ) - stdlib.SetupEnvironment(runtimeEnv, contractHandler, evmContractAccountAddress) + stdlib.SetupEnvironment( + runtimeEnv, + contractHandler, + evmContractAccountAddress, + ) return nil } diff --git a/fvm/evm/stdlib/contract.go b/fvm/evm/stdlib/contract.go index c9e62317a8b..eb78a2a1a5c 100644 --- a/fvm/evm/stdlib/contract.go +++ b/fvm/evm/stdlib/contract.go @@ -1938,8 +1938,12 @@ var internalEVMStandardLibraryType = stdlib.StandardLibraryType{ Kind: common.DeclarationKindContract, } -func SetupEnvironment(env runtime.Environment, handler types.ContractHandler, service flow.Address) { - location := common.NewAddressLocation(nil, common.Address(service), ContractName) +func SetupEnvironment( + env runtime.Environment, + handler types.ContractHandler, + contractAddress flow.Address, +) { + location := common.NewAddressLocation(nil, common.Address(contractAddress), ContractName) env.DeclareType( internalEVMStandardLibraryType, diff --git a/fvm/evm/stdlib/contract_test.go b/fvm/evm/stdlib/contract_test.go index f92cb52ab68..ffcb9109c14 100644 --- a/fvm/evm/stdlib/contract_test.go +++ b/fvm/evm/stdlib/contract_test.go @@ -253,25 +253,25 @@ func deployContracts( } -func newEVMTransactionEnvironment(handler types.ContractHandler, service flow.Address) runtime.Environment { +func newEVMTransactionEnvironment(handler types.ContractHandler, contractAddress flow.Address) runtime.Environment { transactionEnvironment := runtime.NewBaseInterpreterEnvironment(runtime.Config{}) stdlib.SetupEnvironment( transactionEnvironment, handler, - service, + contractAddress, ) return transactionEnvironment } -func newEVMScriptEnvironment(handler types.ContractHandler, service flow.Address) runtime.Environment { +func newEVMScriptEnvironment(handler types.ContractHandler, contractAddress flow.Address) runtime.Environment { scriptEnvironment := runtime.NewScriptInterpreterEnvironment(runtime.Config{}) stdlib.SetupEnvironment( scriptEnvironment, handler, - service, + contractAddress, ) return scriptEnvironment diff --git a/fvm/script.go b/fvm/script.go index c310c73ba00..28067cfc1bd 100644 --- a/fvm/script.go +++ b/fvm/script.go @@ -207,7 +207,6 @@ func (executor *scriptExecutor) executeScript() error { chain.ChainID(), executor.env, rt.ScriptRuntimeEnv, - chain.ServiceAddress(), sc.FlowToken.Address, ) if err != nil { diff --git a/fvm/transactionInvoker.go b/fvm/transactionInvoker.go index 85d7375a0d3..5e05b9016d3 100644 --- a/fvm/transactionInvoker.go +++ b/fvm/transactionInvoker.go @@ -190,7 +190,6 @@ func (executor *transactionExecutor) preprocessTransactionBody() error { chain.ChainID(), executor.env, executor.cadenceRuntime.TxRuntimeEnv, - chain.ServiceAddress(), sc.FlowToken.Address, ) if err != nil { @@ -250,7 +249,6 @@ func (executor *transactionExecutor) ExecuteTransactionBody() error { chain.ChainID(), executor.env, executor.cadenceRuntime.TxRuntimeEnv, - chain.ServiceAddress(), sc.FlowToken.Address, ) if err != nil { From c27754ff02db49bece36d4058692a20fd947950e Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 15 Apr 2024 18:13:37 -0400 Subject: [PATCH 118/148] Update integration/tests/epochs/base_suite.go Co-authored-by: Jordan Schalm --- integration/tests/epochs/base_suite.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/integration/tests/epochs/base_suite.go b/integration/tests/epochs/base_suite.go index d1e10ee1d20..2fb8200cc0a 100644 --- a/integration/tests/epochs/base_suite.go +++ b/integration/tests/epochs/base_suite.go @@ -132,13 +132,6 @@ func (s *BaseSuite) TimedLogf(msg string, args ...interface{}) { s.T().Logf("%s - "+msg, args...) } -//func (s *BaseSuite) TearDownTest() { -// s.log.Info().Msg("================> Start TearDownTest") -// s.net.Remove() -// s.cancel() -// s.log.Info().Msg("================> Finish TearDownTest") -//} - // AwaitEpochPhase waits for the given phase, in the given epoch. func (s *BaseSuite) AwaitEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase, waitFor, tick time.Duration) { var actualEpoch uint64 From 47024daf531de10d911c665c2d55f69e21e61fb8 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 15 Apr 2024 18:26:08 -0400 Subject: [PATCH 119/148] use 0 as a default value force the user to provide values --- cmd/util/cmd/epochs/cmd/recover.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index d365b5daad9..a6a2d09a23b 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -57,15 +57,15 @@ func init() { } func addGenerateRecoverEpochTxArgsCmdFlags() error { - generateRecoverEpochTxArgsCmd.Flags().IntVar(&flagCollectionClusters, "collection-clusters", 3, + generateRecoverEpochTxArgsCmd.Flags().IntVar(&flagCollectionClusters, "collection-clusters", 0, "number of collection clusters") // required parameters for network configuration and generation of root node identities generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagNodeConfigJson, "config", "", "path to a JSON file containing multiple node configurations (fields Role, Address, Weight)") generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory "+ "containing the output from the `keygen` command for internal nodes") - generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInEpoch, "epoch-length", 4000, "length of each epoch measured in views") - generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInStakingAuction, "epoch-staking-phase-length", 100, "length of the epoch staking phase measured in views") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInEpoch, "epoch-length", 0, "length of each epoch measured in views") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInStakingAuction, "epoch-staking-phase-length", 0, "length of the epoch staking phase measured in views") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "the epoch counter used to generate the root cluster block") err := generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-length") From 603d1da1ee5ce0e80d90052d89a54b8f0dc8708d Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 15 Apr 2024 18:33:50 -0400 Subject: [PATCH 120/148] Apply suggestions from code review Co-authored-by: Alexander Hentschel Co-authored-by: Jordan Schalm --- cmd/bootstrap/cmd/final_list.go | 3 +-- cmd/util/cmd/common/clusters.go | 2 +- cmd/util/cmd/epochs/cmd/recover.go | 5 +++-- integration/tests/epochs/recover_epoch/suite.go | 5 +---- 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/cmd/bootstrap/cmd/final_list.go b/cmd/bootstrap/cmd/final_list.go index d2e078cb4b4..ca34739de2a 100644 --- a/cmd/bootstrap/cmd/final_list.go +++ b/cmd/bootstrap/cmd/final_list.go @@ -240,7 +240,6 @@ func assembleInternalNodesWithoutWeight() []model.NodeInfo { if err != nil { log.Fatal().Err(err).Msg("failed to read internal node infos") } - log.Info().Msgf("read %v internal private node-info files", len(privInternals)) var nodes []model.NodeInfo @@ -306,7 +305,7 @@ func createPublicNodeInfo(nodes []model.NodeInfoPub) []model.NodeInfo { log.Fatal().Err(err).Msg(fmt.Sprintf("invalid staking public key: %s", n.StakingPubKey)) } - // all nodes should have equal weight + // all nodes should have equal weight (this might change in the future) node := model.NewPublicNodeInfo( n.NodeID, n.Role, diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index ffb822ba332..01ae58a4433 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -19,7 +19,7 @@ import ( "github.com/onflow/flow-go/module/signature" ) -// ConstructClusterAssignment random cluster assignment with internal and partner nodes. +// ConstructClusterAssignment generates a partially randomized collector cluster assignment with internal and partner nodes. // The number of nodes in each cluster is deterministic and only depends on the number of clusters // and the number of nodes. The repartition of internal and partner nodes is also deterministic // and only depends on the number of clusters and nodes. diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index d365b5daad9..f2fcf86c00a 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -16,11 +16,12 @@ import ( "github.com/onflow/flow-go/state/protocol/inmem" ) -// generateRecoverEpochTxArgsCmd represents a command to generate the data needed to submit an epoch recovery transaction the network is in EFM (epoch fallback mode). +// generateRecoverEpochTxArgsCmd represents a command to generate the data needed to submit an epoch-recovery transaction +// to the network when it is in EFM (epoch fallback mode). // EFM can be exited only by a special service event, EpochRecover, which initially originates from a manual service account transaction. // The full epoch data must be generated manually and submitted with this transaction in order for an // EpochRecover event to be emitted. This command retrieves the current protocol state identities, computes the cluster assignment using those -// identities, generates the cluster QC's and retrieves the DKG key vector of the last successful epoch. +// identities, generates the cluster QCs and retrieves the DKG key vector of the last successful epoch. // This recovery process has some constraints: // - The RecoveryEpoch must have exactly the same consensus committee as participated in the most recent successful DKG. // - The RecoveryEpoch must contain enough "internal" collection nodes so that all clusters contain a supermajority of "internal" collection nodes (same constraint as sporks) diff --git a/integration/tests/epochs/recover_epoch/suite.go b/integration/tests/epochs/recover_epoch/suite.go index bce14f8036c..49e5a3ace58 100644 --- a/integration/tests/epochs/recover_epoch/suite.go +++ b/integration/tests/epochs/recover_epoch/suite.go @@ -10,10 +10,7 @@ type Suite struct { } func (s *Suite) SetupTest() { - // use a longer staking auction length to accommodate staking operations for joining/leaving nodes - // NOTE: this value is set fairly aggressively to ensure shorter test times. - // If flakiness due to failure to complete staking operations in time is observed, - // try increasing (by 10-20 views). + // use a shorter staking auction because we don't have staking operations in this case s.StakingAuctionLen = 2 s.DKGPhaseLen = 50 s.EpochLen = 250 From b40197eeb55aa2b95d9708228ce7d66d7425571c Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 15 Apr 2024 18:34:09 -0400 Subject: [PATCH 121/148] Update cmd/util/cmd/common/node_info.go Co-authored-by: Alexander Hentschel --- cmd/util/cmd/common/node_info.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go index 2ee669fe0fa..255f9067b24 100644 --- a/cmd/util/cmd/common/node_info.go +++ b/cmd/util/cmd/common/node_info.go @@ -17,7 +17,7 @@ import ( // - partnerWeightsPath: path to partner weights configuration file. // - partnerNodeInfoDir: path to partner nodes configuration file. // Returns: -// - []bootstrap.NodeInfo: the generated node info list. +// - []bootstrap.NodeInfo: the generated node info list. (public information, private keys not set) // - error: if any error occurs. Any error returned from this function is irrecoverable. func ReadFullPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNodeInfoDir string) ([]bootstrap.NodeInfo, error) { partners, err := ReadPartnerNodeInfos(partnerNodeInfoDir) From 605385860ec771f939302ebcf645f1d600a27089 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 15 Apr 2024 18:34:22 -0400 Subject: [PATCH 122/148] Update cmd/util/cmd/common/node_info.go Co-authored-by: Alexander Hentschel --- cmd/util/cmd/common/node_info.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go index 255f9067b24..717f0cec579 100644 --- a/cmd/util/cmd/common/node_info.go +++ b/cmd/util/cmd/common/node_info.go @@ -173,7 +173,7 @@ func ReadFullInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, inte // Args: // - internalNodePrivInfoDir: path to internal nodes private info. // Returns: -// - []bootstrap.NodeInfo: the generated private node info list. +// - []bootstrap.NodeInfo: the generated private node info list. Caution: contains private keys! // - error: if any error occurs. Any error returned from this function is irrecoverable. func ReadInternalNodeInfos(internalNodePrivInfoDir string) ([]bootstrap.NodeInfoPriv, error) { var internalPrivInfos []bootstrap.NodeInfoPriv From a7e6e2c8e33449286bb050c9131301aa7979e028 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 15 Apr 2024 18:38:55 -0400 Subject: [PATCH 123/148] Apply suggestions from code review Co-authored-by: Alexander Hentschel --- cmd/util/cmd/common/clusters.go | 11 +++++++++++ cmd/util/cmd/common/node_info.go | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index 01ae58a4433..9b43be9f8b0 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -61,6 +61,17 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes log.Fatal().Err(err).Msg("could not shuffle internals") } + // The following is a heuristic for distributing the internal collector nodes (private staking key available + // to generate QC for cluster root block) and partner nodes (private staking unknown). We need internal nodes + // to control strictly more than 2/3 of the cluster's total weight. + // The following is a heuristic that distributes collectors round-robbin across the specified number of clusters. + // This heuristic only works when all collectors have equal weight! The following sanity check enforces this: + if !(0 < len(partnerNodes) && len(partnerNodes) < 2*len(internalNodes)) { + return nil, nil, fmt.Errorf("requiring at least x>0 number of partner nodes and y > 2x number of internal nodes, but got x,y=%d,%d", len(partnerNodes), len(internalNodes)) + } + // sanity check ^ enforces that there is at least one internal node, hence `internalNodes[0].InitialWeight` is always a valid reference weight + refWeight := internalNodes[0].InitialWeight + identifierLists := make([]flow.IdentifierList, numCollectionClusters) // array to track the 2/3 internal-nodes constraint (internal_nodes > 2 * partner_nodes) constraint := make([]int, numCollectionClusters) diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go index 717f0cec579..c9bd8768a89 100644 --- a/cmd/util/cmd/common/node_info.go +++ b/cmd/util/cmd/common/node_info.go @@ -122,7 +122,7 @@ func ReadPartnerNodeInfos(partnerNodeInfoDir string) ([]bootstrap.NodeInfoPub, e // - internalNodePrivInfoDir: path to internal nodes private info. // - internalWeightsConfig: path to internal weights configuration file. // Returns: -// - []bootstrap.NodeInfo: the generated node info list. +// - []bootstrap.NodeInfo: the generated node info list. Caution: contains private keys! // - error: if any error occurs. Any error returned from this function is irrecoverable. func ReadFullInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, internalWeightsConfig string) ([]bootstrap.NodeInfo, error) { privInternals, err := ReadInternalNodeInfos(internalNodePrivInfoDir) From f823c450fe3696c40e163ac75a68136460e8d079 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 15 Apr 2024 18:39:17 -0400 Subject: [PATCH 124/148] Update cmd/util/cmd/common/node_info.go Co-authored-by: Alexander Hentschel --- cmd/util/cmd/common/node_info.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go index c9bd8768a89..11d4d64a251 100644 --- a/cmd/util/cmd/common/node_info.go +++ b/cmd/util/cmd/common/node_info.go @@ -75,7 +75,7 @@ func ReadFullPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNod // Args: // - partnerWeightsPath: path to partner weights configuration file. // Returns: -// - PartnerWeights: the generated partner weights list. +// - PartnerWeights: map from NodeID → node's weight // - error: if any error occurs. Any error returned from this function is irrecoverable. func ReadPartnerWeights(partnerWeightsPath string) (PartnerWeights, error) { var weights PartnerWeights From 5e3dffd2ac6bbc21ff8ae83d1ce997bba4ae062e Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 15 Apr 2024 18:47:50 -0400 Subject: [PATCH 125/148] add sanity check ensure all node weights are equal when generating cluster assignment --- cmd/util/cmd/common/clusters.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index 9b43be9f8b0..ef04bb3c94d 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -71,13 +71,16 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes } // sanity check ^ enforces that there is at least one internal node, hence `internalNodes[0].InitialWeight` is always a valid reference weight refWeight := internalNodes[0].InitialWeight - + identifierLists := make([]flow.IdentifierList, numCollectionClusters) // array to track the 2/3 internal-nodes constraint (internal_nodes > 2 * partner_nodes) constraint := make([]int, numCollectionClusters) // first, round-robin internal nodes into each cluster for i, node := range internals { + if node.InitialWeight != refWeight { + return nil, nil, fmt.Errorf("current implementation requires all collectors (partner & interal nodes) to have equal weight") + } clusterIndex := i % numCollectionClusters identifierLists[clusterIndex] = append(identifierLists[clusterIndex], node.NodeID) constraint[clusterIndex] += 1 @@ -85,6 +88,9 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes // next, round-robin partner nodes into each cluster for i, node := range partners { + if node.InitialWeight != refWeight { + return nil, nil, fmt.Errorf("current implementation requires all collectors (partner & interal nodes) to have equal weight") + } clusterIndex := i % numCollectionClusters identifierLists[clusterIndex] = append(identifierLists[clusterIndex], node.NodeID) constraint[clusterIndex] -= 2 From 7a27621b833083b102a2cf88a18721d1cbfedf4e Mon Sep 17 00:00:00 2001 From: Giovanni Sanchez <108043524+sisyphusSmiling@users.noreply.github.com> Date: Fri, 12 Apr 2024 15:46:17 -0500 Subject: [PATCH 126/148] add bridging interface to COA --- fvm/evm/stdlib/contract.cdc | 110 ++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/fvm/evm/stdlib/contract.cdc b/fvm/evm/stdlib/contract.cdc index c3b7fb3a52f..2e453f89d24 100644 --- a/fvm/evm/stdlib/contract.cdc +++ b/fvm/evm/stdlib/contract.cdc @@ -1,4 +1,6 @@ import Crypto +import "NonFungibleToken" +import "FungibleToken" import "FlowToken" access(all) @@ -288,6 +290,59 @@ contract EVM { value: value.attoflow ) as! Result } + + /// Bridges the given NFT to the EVM environment, requiring a Provider from which to withdraw a fee to fulfill + /// the bridge request + access(all) + fun depositNFT( + nft: @{NonFungibleToken.NFT}, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ) { + EVM.borrowBridgeAccessor().depositNFT(nft: <-nft, to: self.address(), feeProvider: feeProvider) + } + + /// Bridges the given NFT from the EVM environment, requiring a Provider from which to withdraw a fee to fulfill + /// the bridge request. Note: the caller should own the requested NFT in EVM + access(Owner | Bridge) + fun withdrawNFT( + type: Type, + id: UInt256, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ): @{NonFungibleToken.NFT} { + return <- EVM.borrowBridgeAccessor().withdrawNFT( + caller: &self as auth(Call) &CadenceOwnedAccount, + type: type, + id: id, + feeProvider: feeProvider + ) + } + + /// Bridges the given Vault to the EVM environment, requiring a Provider from which to withdraw a fee to fulfill + /// the bridge request + access(all) + fun depositTokens( + vault: @{FungibleToken.Vault}, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ) { + EVM.borrowBridgeAccessor().depositTokens(vault: <-vault, to: self.address(), feeProvider: feeProvider) + } + + /// Bridges the given fungible tokens from the EVM environment, requiring a Provider from which to withdraw a + /// fee to fulfill the bridge request. Note: the caller should own the requested tokens & sufficient balance of + /// requested tokens in EVM + access(Owner | Bridge) + fun withdrawTokens( + type: Type, + amount: UInt256, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ): @{FungibleToken.Vault} { + return <- EVM.borrowBridgeAccessor().withdrawTokens( + caller: &self as auth(Call) &CadenceOwnedAccount, + type: type, + amount: amount, + feeProvider: feeProvider + ) + } } /// Creates a new cadence owned account @@ -488,4 +543,59 @@ contract EVM { fun getLatestBlock(): EVMBlock { return InternalEVM.getLatestBlock() as! EVMBlock } + + /// Interface for a resource which acts as an entrypoint to the VM bridge + access(all) + resource interface BridgeAccessor { + + /// Endpoint enabling the bridging of an NFT to EVM + access(Bridge) + fun depositNFT( + nft: @{NonFungibleToken.NFT}, + to: EVMAddress, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ) + + /// Endpoint enabling the bridging of an NFT from EVM + access(Bridge) + fun withdrawNFT( + caller: auth(Call) &CadenceOwnedAccount, + type: Type, + id: UInt256, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ): @{NonFungibleToken.NFT} + + /// Endpoint enabling the bridging of a fungible token vault to EVM + access(Bridge) + fun depositTokens( + vault: @{FungibleToken.Vault}, + to: EVMAddress, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ) + + /// Endpoint enabling the bridging of fungible tokens from EVM + access(Bridge) + fun withdrawTokens( + caller: auth(Call) &CadenceOwnedAccount, + type: Type, + amount: UInt256, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ): @{FungibleToken.Vault} + } + + /// Interface which captures a Capability to the bridge Accessor, saving it within the BridgeRouter resource + access(all) + resource interface BridgeRouter { + + /// Returns a reference to the BridgeAccessor designated for internal bridge requests + access(Bridge) view fun borrowBridgeAccessor(): auth(Bridge) &{BridgeAccessor} + } + + /// Returns a reference to the BridgeAccessor designated for internal bridge requests + access(self) + view fun borrowBridgeAccessor(): auth(Bridge) &{BridgeAccessor} { + return self.account.storage.borrow(from: /storage/evmBridgeRouter) + ?.borrowBridgeAccessor() + ?? panic("Could not borrow reference to the EVM bridge") + } } From b8d0d830c92fcb218db37df989a06bf464907375 Mon Sep 17 00:00:00 2001 From: Giovanni Sanchez <108043524+sisyphusSmiling@users.noreply.github.com> Date: Mon, 15 Apr 2024 16:59:33 -0500 Subject: [PATCH 127/148] fix updated EVM dependency aliasing --- fvm/bootstrap.go | 4 ++-- fvm/evm/stdlib/contract.go | 19 +++++++++++++++---- fvm/evm/stdlib/contract_test.go | 2 +- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/fvm/bootstrap.go b/fvm/bootstrap.go index 7ce37e0828b..fbf875b8827 100644 --- a/fvm/bootstrap.go +++ b/fvm/bootstrap.go @@ -806,7 +806,7 @@ func (b *bootstrapExecutor) setStakingAllowlist( panicOnMetaInvokeErrf("failed to set staking allow-list: %s", txError, err) } -func (b *bootstrapExecutor) setupEVM(serviceAddress, fungibleTokenAddress, flowTokenAddress flow.Address) { +func (b *bootstrapExecutor) setupEVM(serviceAddress, nonFungibleTokenAddress, fungibleTokenAddress, flowTokenAddress flow.Address) { if b.setupEVMEnabled { // account for storage // we dont need to deploy anything to this account, but it needs to exist @@ -817,7 +817,7 @@ func (b *bootstrapExecutor) setupEVM(serviceAddress, fungibleTokenAddress, flowT // deploy the EVM contract to the service account tx := blueprints.DeployContractTransaction( serviceAddress, - stdlib.ContractCode(flowTokenAddress), + stdlib.ContractCode(nonFungibleTokenAddress, fungibleTokenAddress, flowTokenAddress), stdlib.ContractName, ) // WithEVMEnabled should only be used after we create an account for storage diff --git a/fvm/evm/stdlib/contract.go b/fvm/evm/stdlib/contract.go index c9e62317a8b..7178d3ecc2f 100644 --- a/fvm/evm/stdlib/contract.go +++ b/fvm/evm/stdlib/contract.go @@ -27,13 +27,24 @@ import ( //go:embed contract.cdc var contractCode string -var flowTokenImportPattern = regexp.MustCompile(`(?m)^import "FlowToken"\n`) +var nftImportPattern = regexp.MustCompile(`(?m)^import "NonFungibleToken"`) +var fungibleTokenImportPattern = regexp.MustCompile(`(?m)^import "FungibleToken"`) +var flowTokenImportPattern = regexp.MustCompile(`(?m)^import "FlowToken"`) -func ContractCode(flowTokenAddress flow.Address) []byte { - return []byte(flowTokenImportPattern.ReplaceAllString( +func ContractCode(nonFungibleTokenAddress, fungibleTokenAddress, flowTokenAddress flow.Address) []byte { + contractCode = nftImportPattern.ReplaceAllString( + contractCode, + fmt.Sprintf("import NonFungibleToken from %s", nonFungibleTokenAddress.HexWithPrefix()), + ) + contractCode = fungibleTokenImportPattern.ReplaceAllString( + contractCode, + fmt.Sprintf("import FungibleToken from %s", fungibleTokenAddress.HexWithPrefix()), + ) + contractCode = flowTokenImportPattern.ReplaceAllString( contractCode, fmt.Sprintf("import FlowToken from %s", flowTokenAddress.HexWithPrefix()), - )) + ) + return []byte(contractCode) } const ContractName = "EVM" diff --git a/fvm/evm/stdlib/contract_test.go b/fvm/evm/stdlib/contract_test.go index f92cb52ab68..2af947436f2 100644 --- a/fvm/evm/stdlib/contract_test.go +++ b/fvm/evm/stdlib/contract_test.go @@ -223,7 +223,7 @@ func deployContracts( }, { name: stdlib.ContractName, - code: stdlib.ContractCode(contractsAddress), + code: stdlib.ContractCode(contractsAddress, contractsAddress, contractsAddress), }, } From 45038251418c340baa9a4267ce7b4a646eedee60 Mon Sep 17 00:00:00 2001 From: Giovanni Sanchez <108043524+sisyphusSmiling@users.noreply.github.com> Date: Mon, 15 Apr 2024 18:18:56 -0500 Subject: [PATCH 128/148] update EVM bridging interfaces for Cadence 0.42 --- fvm/evm/stdlib/contract.cdc | 38 ++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/fvm/evm/stdlib/contract.cdc b/fvm/evm/stdlib/contract.cdc index 2e453f89d24..c264fe1b85d 100644 --- a/fvm/evm/stdlib/contract.cdc +++ b/fvm/evm/stdlib/contract.cdc @@ -296,7 +296,7 @@ contract EVM { access(all) fun depositNFT( nft: @{NonFungibleToken.NFT}, - feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + feeProvider: &{FungibleToken.Provider} ) { EVM.borrowBridgeAccessor().depositNFT(nft: <-nft, to: self.address(), feeProvider: feeProvider) } @@ -307,10 +307,10 @@ contract EVM { fun withdrawNFT( type: Type, id: UInt256, - feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + feeProvider: &{FungibleToken.Provider} ): @{NonFungibleToken.NFT} { return <- EVM.borrowBridgeAccessor().withdrawNFT( - caller: &self as auth(Call) &CadenceOwnedAccount, + caller: &self, type: type, id: id, feeProvider: feeProvider @@ -322,7 +322,7 @@ contract EVM { access(all) fun depositTokens( vault: @{FungibleToken.Vault}, - feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + feeProvider: &{FungibleToken.Provider} ) { EVM.borrowBridgeAccessor().depositTokens(vault: <-vault, to: self.address(), feeProvider: feeProvider) } @@ -334,10 +334,10 @@ contract EVM { fun withdrawTokens( type: Type, amount: UInt256, - feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + feeProvider: &{FungibleToken.Provider} ): @{FungibleToken.Vault} { return <- EVM.borrowBridgeAccessor().withdrawTokens( - caller: &self as auth(Call) &CadenceOwnedAccount, + caller: &self, type: type, amount: amount, feeProvider: feeProvider @@ -549,37 +549,37 @@ contract EVM { resource interface BridgeAccessor { /// Endpoint enabling the bridging of an NFT to EVM - access(Bridge) + access(all) fun depositNFT( nft: @{NonFungibleToken.NFT}, to: EVMAddress, - feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + feeProvider: &{FungibleToken.Provider} ) /// Endpoint enabling the bridging of an NFT from EVM - access(Bridge) + access(all) fun withdrawNFT( - caller: auth(Call) &CadenceOwnedAccount, + caller: &CadenceOwnedAccount, type: Type, id: UInt256, - feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + feeProvider: &{FungibleToken.Provider} ): @{NonFungibleToken.NFT} /// Endpoint enabling the bridging of a fungible token vault to EVM - access(Bridge) + access(all) fun depositTokens( vault: @{FungibleToken.Vault}, to: EVMAddress, - feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + feeProvider: &{FungibleToken.Provider} ) /// Endpoint enabling the bridging of fungible tokens from EVM - access(Bridge) + access(all) fun withdrawTokens( - caller: auth(Call) &CadenceOwnedAccount, + caller: &CadenceOwnedAccount, type: Type, amount: UInt256, - feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + feeProvider: &{FungibleToken.Provider} ): @{FungibleToken.Vault} } @@ -588,13 +588,13 @@ contract EVM { resource interface BridgeRouter { /// Returns a reference to the BridgeAccessor designated for internal bridge requests - access(Bridge) view fun borrowBridgeAccessor(): auth(Bridge) &{BridgeAccessor} + access(all) view fun borrowBridgeAccessor(): &{BridgeAccessor} } /// Returns a reference to the BridgeAccessor designated for internal bridge requests access(self) - view fun borrowBridgeAccessor(): auth(Bridge) &{BridgeAccessor} { - return self.account.storage.borrow(from: /storage/evmBridgeRouter) + view fun borrowBridgeAccessor(): &{BridgeAccessor} { + return self.account.borrow<&{BridgeRouter}>(from: /storage/evmBridgeRouter) ?.borrowBridgeAccessor() ?? panic("Could not borrow reference to the EVM bridge") } From 0a99512b1ecf3299f000b33e963877134fa63ac2 Mon Sep 17 00:00:00 2001 From: Giovanni Sanchez <108043524+sisyphusSmiling@users.noreply.github.com> Date: Mon, 15 Apr 2024 19:01:26 -0500 Subject: [PATCH 129/148] remove entitlement from EVM & fix bootstrap --- fvm/bootstrap.go | 2 +- fvm/evm/stdlib/contract.cdc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/fvm/bootstrap.go b/fvm/bootstrap.go index fbf875b8827..858fea9d9b6 100644 --- a/fvm/bootstrap.go +++ b/fvm/bootstrap.go @@ -392,7 +392,7 @@ func (b *bootstrapExecutor) Execute() error { b.setStakingAllowlist(service, b.identities.NodeIDs()) // sets up the EVM environment - b.setupEVM(service, fungibleToken, flowToken) + b.setupEVM(service, nonFungibleToken, fungibleToken, flowToken) return nil } diff --git a/fvm/evm/stdlib/contract.cdc b/fvm/evm/stdlib/contract.cdc index c264fe1b85d..ce15d5a417e 100644 --- a/fvm/evm/stdlib/contract.cdc +++ b/fvm/evm/stdlib/contract.cdc @@ -303,7 +303,7 @@ contract EVM { /// Bridges the given NFT from the EVM environment, requiring a Provider from which to withdraw a fee to fulfill /// the bridge request. Note: the caller should own the requested NFT in EVM - access(Owner | Bridge) + access(all) fun withdrawNFT( type: Type, id: UInt256, @@ -330,7 +330,7 @@ contract EVM { /// Bridges the given fungible tokens from the EVM environment, requiring a Provider from which to withdraw a /// fee to fulfill the bridge request. Note: the caller should own the requested tokens & sufficient balance of /// requested tokens in EVM - access(Owner | Bridge) + access(all) fun withdrawTokens( type: Type, amount: UInt256, From a55fda107103fde330279553a36809ce9198ae0c Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Mon, 15 Apr 2024 21:35:42 -0400 Subject: [PATCH 130/148] Update node_info.go --- cmd/util/cmd/common/node_info.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go index 11d4d64a251..061741d0955 100644 --- a/cmd/util/cmd/common/node_info.go +++ b/cmd/util/cmd/common/node_info.go @@ -75,7 +75,7 @@ func ReadFullPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNod // Args: // - partnerWeightsPath: path to partner weights configuration file. // Returns: -// - PartnerWeights: map from NodeID → node's weight +// - PartnerWeights: map from NodeID → node's weight // - error: if any error occurs. Any error returned from this function is irrecoverable. func ReadPartnerWeights(partnerWeightsPath string) (PartnerWeights, error) { var weights PartnerWeights From 9ce40e2fb7510d8e42a9939f7025d7d44bffebde Mon Sep 17 00:00:00 2001 From: Giovanni Sanchez <108043524+sisyphusSmiling@users.noreply.github.com> Date: Tue, 16 Apr 2024 11:18:43 -0500 Subject: [PATCH 131/148] fix casting statement in EVM contract --- fvm/evm/stdlib/contract.cdc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fvm/evm/stdlib/contract.cdc b/fvm/evm/stdlib/contract.cdc index ce15d5a417e..45a9dca6042 100644 --- a/fvm/evm/stdlib/contract.cdc +++ b/fvm/evm/stdlib/contract.cdc @@ -310,7 +310,7 @@ contract EVM { feeProvider: &{FungibleToken.Provider} ): @{NonFungibleToken.NFT} { return <- EVM.borrowBridgeAccessor().withdrawNFT( - caller: &self, + caller: &self as &CadenceOwnedAccount, type: type, id: id, feeProvider: feeProvider @@ -337,7 +337,7 @@ contract EVM { feeProvider: &{FungibleToken.Provider} ): @{FungibleToken.Vault} { return <- EVM.borrowBridgeAccessor().withdrawTokens( - caller: &self, + caller: &self as &CadenceOwnedAccount, type: type, amount: amount, feeProvider: feeProvider From ccc99ec43c5b17dc0d8018aa8b0006077d569126 Mon Sep 17 00:00:00 2001 From: Giovanni Sanchez <108043524+sisyphusSmiling@users.noreply.github.com> Date: Tue, 16 Apr 2024 11:28:44 -0500 Subject: [PATCH 132/148] fix accepted NFT & Vault conformance types in EVM contract --- fvm/evm/stdlib/contract.cdc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/fvm/evm/stdlib/contract.cdc b/fvm/evm/stdlib/contract.cdc index 45a9dca6042..b70f5f90d3a 100644 --- a/fvm/evm/stdlib/contract.cdc +++ b/fvm/evm/stdlib/contract.cdc @@ -295,7 +295,7 @@ contract EVM { /// the bridge request access(all) fun depositNFT( - nft: @{NonFungibleToken.NFT}, + nft: @NonFungibleToken.NFT, feeProvider: &{FungibleToken.Provider} ) { EVM.borrowBridgeAccessor().depositNFT(nft: <-nft, to: self.address(), feeProvider: feeProvider) @@ -308,7 +308,7 @@ contract EVM { type: Type, id: UInt256, feeProvider: &{FungibleToken.Provider} - ): @{NonFungibleToken.NFT} { + ): @NonFungibleToken.NFT { return <- EVM.borrowBridgeAccessor().withdrawNFT( caller: &self as &CadenceOwnedAccount, type: type, @@ -321,7 +321,7 @@ contract EVM { /// the bridge request access(all) fun depositTokens( - vault: @{FungibleToken.Vault}, + vault: @FungibleToken.Vault, feeProvider: &{FungibleToken.Provider} ) { EVM.borrowBridgeAccessor().depositTokens(vault: <-vault, to: self.address(), feeProvider: feeProvider) @@ -335,7 +335,7 @@ contract EVM { type: Type, amount: UInt256, feeProvider: &{FungibleToken.Provider} - ): @{FungibleToken.Vault} { + ): @FungibleToken.Vault { return <- EVM.borrowBridgeAccessor().withdrawTokens( caller: &self as &CadenceOwnedAccount, type: type, @@ -551,7 +551,7 @@ contract EVM { /// Endpoint enabling the bridging of an NFT to EVM access(all) fun depositNFT( - nft: @{NonFungibleToken.NFT}, + nft: @NonFungibleToken.NFT, to: EVMAddress, feeProvider: &{FungibleToken.Provider} ) @@ -563,12 +563,12 @@ contract EVM { type: Type, id: UInt256, feeProvider: &{FungibleToken.Provider} - ): @{NonFungibleToken.NFT} + ): @NonFungibleToken.NFT /// Endpoint enabling the bridging of a fungible token vault to EVM access(all) fun depositTokens( - vault: @{FungibleToken.Vault}, + vault: @FungibleToken.Vault, to: EVMAddress, feeProvider: &{FungibleToken.Provider} ) @@ -580,7 +580,7 @@ contract EVM { type: Type, amount: UInt256, feeProvider: &{FungibleToken.Provider} - ): @{FungibleToken.Vault} + ): @FungibleToken.Vault } /// Interface which captures a Capability to the bridge Accessor, saving it within the BridgeRouter resource From 809c541f85c3f97a7eef0395caedc79f9f2b7e6c Mon Sep 17 00:00:00 2001 From: Giovanni Sanchez <108043524+sisyphusSmiling@users.noreply.github.com> Date: Tue, 16 Apr 2024 11:53:54 -0500 Subject: [PATCH 133/148] fix change_contract_code_migration --- .../ledger/migrations/change_contract_code_migration.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/util/ledger/migrations/change_contract_code_migration.go b/cmd/util/ledger/migrations/change_contract_code_migration.go index c2715bdc8d0..f21f3b43c4f 100644 --- a/cmd/util/ledger/migrations/change_contract_code_migration.go +++ b/cmd/util/ledger/migrations/change_contract_code_migration.go @@ -326,7 +326,11 @@ func SystemContractChanges(chainID flow.ChainID) []SystemContractChange { // EVM related contracts NewSystemContractChange( systemContracts.EVMContract, - evm.ContractCode(systemContracts.FlowToken.Address), + evm.ContractCode( + systemContracts.NonFungibleToken.Address, + systemContracts.FungibleToken.Address, + systemContracts.FlowToken.Address, + ), ), } } From cb42dc5656f9e28ea71945c21d80841f30218cb0 Mon Sep 17 00:00:00 2001 From: Giovanni Sanchez <108043524+sisyphusSmiling@users.noreply.github.com> Date: Tue, 16 Apr 2024 13:32:34 -0500 Subject: [PATCH 134/148] commit fvm_test.go patch thanks to @m-peter --- fvm/fvm_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 900a9f7a56f..fa8c52306c8 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -3065,6 +3065,7 @@ func TestEVM(t *testing.T) { withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). withContextOptions( fvm.WithEVMEnabled(true), + fvm.WithChain(flow.Emulator.Chain()), fvm.WithCadenceLogging(true), ). run(func( @@ -3226,6 +3227,7 @@ func TestEVM(t *testing.T) { // so we have to use emulator here so that the EVM storage contract is deployed // to the 5th address fvm.WithChain(flow.Emulator.Chain()), + fvm.WithEVMEnabled(true), ). run(func( t *testing.T, From ab8847256026484f82146d49097d721df9e1d44e Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 16 Apr 2024 14:42:30 -0400 Subject: [PATCH 135/148] Update clusters.go --- cmd/util/cmd/common/clusters.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index ef04bb3c94d..eb05f6217d5 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -66,7 +66,7 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes // to control strictly more than 2/3 of the cluster's total weight. // The following is a heuristic that distributes collectors round-robbin across the specified number of clusters. // This heuristic only works when all collectors have equal weight! The following sanity check enforces this: - if !(0 < len(partnerNodes) && len(partnerNodes) < 2*len(internalNodes)) { + if !(len(partnerNodes) > 0 && len(partnerNodes) < 2*len(internalNodes)) { return nil, nil, fmt.Errorf("requiring at least x>0 number of partner nodes and y > 2x number of internal nodes, but got x,y=%d,%d", len(partnerNodes), len(internalNodes)) } // sanity check ^ enforces that there is at least one internal node, hence `internalNodes[0].InitialWeight` is always a valid reference weight From 12e35346de7ffebdf5806abd7652fd744b637deb Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 16 Apr 2024 15:07:24 -0400 Subject: [PATCH 136/148] Update clusters.go --- cmd/util/cmd/common/clusters.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index eb05f6217d5..dece8d7ad8f 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -66,7 +66,7 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes // to control strictly more than 2/3 of the cluster's total weight. // The following is a heuristic that distributes collectors round-robbin across the specified number of clusters. // This heuristic only works when all collectors have equal weight! The following sanity check enforces this: - if !(len(partnerNodes) > 0 && len(partnerNodes) < 2*len(internalNodes)) { + if !(len(partnerNodes) > 0 && len(partnerNodes) > 2*len(internalNodes)) { return nil, nil, fmt.Errorf("requiring at least x>0 number of partner nodes and y > 2x number of internal nodes, but got x,y=%d,%d", len(partnerNodes), len(internalNodes)) } // sanity check ^ enforces that there is at least one internal node, hence `internalNodes[0].InitialWeight` is always a valid reference weight From e64ad8a3fb6510d8b19d5a681d1fa6de72a0fa08 Mon Sep 17 00:00:00 2001 From: Khalil Claybon Date: Tue, 16 Apr 2024 15:08:13 -0400 Subject: [PATCH 137/148] Update clusters.go --- cmd/util/cmd/common/clusters.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index dece8d7ad8f..3e912b6d224 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -66,7 +66,7 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes // to control strictly more than 2/3 of the cluster's total weight. // The following is a heuristic that distributes collectors round-robbin across the specified number of clusters. // This heuristic only works when all collectors have equal weight! The following sanity check enforces this: - if !(len(partnerNodes) > 0 && len(partnerNodes) > 2*len(internalNodes)) { + if len(partnerNodes) > 0 && len(partnerNodes) > 2*len(internalNodes) { return nil, nil, fmt.Errorf("requiring at least x>0 number of partner nodes and y > 2x number of internal nodes, but got x,y=%d,%d", len(partnerNodes), len(internalNodes)) } // sanity check ^ enforces that there is at least one internal node, hence `internalNodes[0].InitialWeight` is always a valid reference weight From ade69c99594aba3e258542f42a7b01ec1f949856 Mon Sep 17 00:00:00 2001 From: Giovanni Sanchez <108043524+sisyphusSmiling@users.noreply.github.com> Date: Tue, 16 Apr 2024 15:31:40 -0500 Subject: [PATCH 138/148] fix stdlib.ContractCode var mutation scope pointed out by @m-peter --- fvm/evm/stdlib/contract.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/fvm/evm/stdlib/contract.go b/fvm/evm/stdlib/contract.go index 7178d3ecc2f..95478c2f8bd 100644 --- a/fvm/evm/stdlib/contract.go +++ b/fvm/evm/stdlib/contract.go @@ -32,19 +32,19 @@ var fungibleTokenImportPattern = regexp.MustCompile(`(?m)^import "FungibleToken" var flowTokenImportPattern = regexp.MustCompile(`(?m)^import "FlowToken"`) func ContractCode(nonFungibleTokenAddress, fungibleTokenAddress, flowTokenAddress flow.Address) []byte { - contractCode = nftImportPattern.ReplaceAllString( + evmContract := nftImportPattern.ReplaceAllString( contractCode, fmt.Sprintf("import NonFungibleToken from %s", nonFungibleTokenAddress.HexWithPrefix()), ) - contractCode = fungibleTokenImportPattern.ReplaceAllString( - contractCode, + evmContract = fungibleTokenImportPattern.ReplaceAllString( + evmContract, fmt.Sprintf("import FungibleToken from %s", fungibleTokenAddress.HexWithPrefix()), ) - contractCode = flowTokenImportPattern.ReplaceAllString( - contractCode, + evmContract = flowTokenImportPattern.ReplaceAllString( + evmContract, fmt.Sprintf("import FlowToken from %s", flowTokenAddress.HexWithPrefix()), ) - return []byte(contractCode) + return []byte(evmContract) } const ContractName = "EVM" From 291ea4a2713935d4549f27754e3669369d02e280 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 16 Apr 2024 14:44:11 -0700 Subject: [PATCH 139/148] log the checkpoint file when using generating protocol snapshot from latest checkpoint file --- admin/commands/storage/read_protocol_snapshot.go | 13 +++++++++++-- cmd/util/cmd/read-protocol-state/cmd/snapshot.go | 7 ++++--- cmd/util/common/checkpoint.go | 14 +++++++------- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/admin/commands/storage/read_protocol_snapshot.go b/admin/commands/storage/read_protocol_snapshot.go index 8b87164f230..738e6409936 100644 --- a/admin/commands/storage/read_protocol_snapshot.go +++ b/admin/commands/storage/read_protocol_snapshot.go @@ -57,7 +57,7 @@ func (s *ProtocolSnapshotCommand) Handler(_ context.Context, req *admin.CommandR s.logger.Info().Uint("blocksToSkip", blocksToSkip).Msgf("admintool: generating protocol snapshot") - snapshot, sealedHeight, commit, err := common.GenerateProtocolSnapshotForCheckpoint( + snapshot, sealedHeight, commit, checkpointFile, err := common.GenerateProtocolSnapshotForCheckpoint( s.logger, s.state, s.headers, s.seals, s.checkpointDir, blocksToSkip) if err != nil { return nil, fmt.Errorf("could not generate protocol snapshot for checkpoint, checkpointDir %v: %w", @@ -79,10 +79,19 @@ func (s *ProtocolSnapshotCommand) Handler(_ context.Context, req *admin.CommandR Hex("finalized_block_id", logging.Entity(header)). Uint64("sealed_height", sealedHeight). Hex("sealed_commit", commit[:]). // not the commit for the finalized height, but for the sealed height + Str("checkpoint_file", checkpointFile). Uint("blocks_to_skip", blocksToSkip). Msgf("admintool: protocol snapshot generated successfully") - return commands.ConvertToMap(serializable.Encodable()) + return commands.ConvertToMap(protocolSnapshotResponse{ + Snapshot: serializable.Encodable(), + Checkpoint: checkpointFile, + }) +} + +type protocolSnapshotResponse struct { + Snapshot inmem.EncodableSnapshot `json:"snapshot"` + Checkpoint string `json:"checkpoint"` } func (s *ProtocolSnapshotCommand) Validator(req *admin.CommandRequest) error { diff --git a/cmd/util/cmd/read-protocol-state/cmd/snapshot.go b/cmd/util/cmd/read-protocol-state/cmd/snapshot.go index 77a9d77777f..765a55fd02f 100644 --- a/cmd/util/cmd/read-protocol-state/cmd/snapshot.go +++ b/cmd/util/cmd/read-protocol-state/cmd/snapshot.go @@ -70,13 +70,14 @@ func runSnapshot(*cobra.Command, []string) { var protocolSnapshot protocol.Snapshot var sealedHeight uint64 var sealedCommit flow.StateCommitment + var checkpointFile string if flagCheckpointScanEndHeight < 0 { // using default end height which is the last sealed height - protocolSnapshot, sealedHeight, sealedCommit, err = commonFuncs.GenerateProtocolSnapshotForCheckpoint( + protocolSnapshot, sealedHeight, sealedCommit, checkpointFile, err = commonFuncs.GenerateProtocolSnapshotForCheckpoint( log.Logger, state, storages.Headers, storages.Seals, flagCheckpointDir, flagCheckpointScanStep) } else { // using customized end height - protocolSnapshot, sealedHeight, sealedCommit, err = commonFuncs.GenerateProtocolSnapshotForCheckpointWithHeights( + protocolSnapshot, sealedHeight, sealedCommit, checkpointFile, err = commonFuncs.GenerateProtocolSnapshotForCheckpointWithHeights( log.Logger, state, storages.Headers, storages.Seals, flagCheckpointDir, flagCheckpointScanStep, uint64(flagCheckpointScanEndHeight)) } @@ -85,7 +86,7 @@ func runSnapshot(*cobra.Command, []string) { } snapshot = protocolSnapshot - log.Info().Msgf("snapshot found, sealed height %v, commit %x", sealedHeight, sealedCommit) + log.Info().Msgf("snapshot found for checkpoint file %v, sealed height %v, commit %x", checkpointFile, sealedHeight, sealedCommit) } head, err := snapshot.Head() diff --git a/cmd/util/common/checkpoint.go b/cmd/util/common/checkpoint.go index bddcead9e49..098db2cc096 100644 --- a/cmd/util/common/checkpoint.go +++ b/cmd/util/common/checkpoint.go @@ -113,12 +113,12 @@ func GenerateProtocolSnapshotForCheckpoint( seals storage.Seals, checkpointDir string, blocksToSkip uint, -) (protocol.Snapshot, uint64, flow.StateCommitment, error) { +) (protocol.Snapshot, uint64, flow.StateCommitment, string, error) { // skip X blocks (i.e. 10) each time to find the block that produces the state commitment in the checkpoint file // since a checkpoint file contains 500 tries, this allows us to find the block more efficiently sealed, err := state.Sealed().Head() if err != nil { - return nil, 0, flow.DummyStateCommitment, err + return nil, 0, flow.DummyStateCommitment, "", err } endHeight := sealed.Height @@ -156,7 +156,7 @@ func GenerateProtocolSnapshotForCheckpointWithHeights( checkpointDir string, blocksToSkip uint, endHeight uint64, -) (protocol.Snapshot, uint64, flow.StateCommitment, error) { +) (protocol.Snapshot, uint64, flow.StateCommitment, string, error) { // Stop searching after 10,000 iterations or upon reaching the minimum height, whichever comes first. startHeight := uint64(0) // preventing startHeight from being negative @@ -167,7 +167,7 @@ func GenerateProtocolSnapshotForCheckpointWithHeights( checkpointFilePath, err := findLatestCheckpointFilePath(checkpointDir) if err != nil { - return nil, 0, flow.DummyStateCommitment, fmt.Errorf("could not find latest checkpoint file in directory %v: %w", checkpointDir, err) + return nil, 0, flow.DummyStateCommitment, "", fmt.Errorf("could not find latest checkpoint file in directory %v: %w", checkpointDir, err) } log.Info(). @@ -178,7 +178,7 @@ func GenerateProtocolSnapshotForCheckpointWithHeights( // find the height of the finalized block that produces the state commitment contained in the checkpoint file sealedHeight, commit, finalizedHeight, err := FindHeightsByCheckpoints(logger, headers, seals, checkpointFilePath, blocksToSkip, startHeight, endHeight) if err != nil { - return nil, 0, flow.DummyStateCommitment, fmt.Errorf("could not find sealed height in range [%v:%v] (blocksToSkip: %v) by checkpoints: %w", + return nil, 0, flow.DummyStateCommitment, "", fmt.Errorf("could not find sealed height in range [%v:%v] (blocksToSkip: %v) by checkpoints: %w", startHeight, endHeight, blocksToSkip, err) } @@ -186,10 +186,10 @@ func GenerateProtocolSnapshotForCheckpointWithHeights( snapshot := state.AtHeight(finalizedHeight) validSnapshot, err := snapshots.GetDynamicBootstrapSnapshot(state, snapshot) if err != nil { - return nil, 0, flow.DummyStateCommitment, fmt.Errorf("could not get dynamic bootstrap snapshot: %w", err) + return nil, 0, flow.DummyStateCommitment, "", fmt.Errorf("could not get dynamic bootstrap snapshot: %w", err) } - return validSnapshot, sealedHeight, commit, nil + return validSnapshot, sealedHeight, commit, checkpointFilePath, nil } // hashesToCommits converts a list of ledger.RootHash to a list of flow.StateCommitment From 78efce223123bca30ae3b1d8ccaa37f64898dbba Mon Sep 17 00:00:00 2001 From: Giovanni Sanchez <108043524+sisyphusSmiling@users.noreply.github.com> Date: Wed, 17 Apr 2024 12:39:43 -0500 Subject: [PATCH 140/148] add BridgeAccessorUpdated event & BridgeAccessor setter to EVM contract --- fvm/evm/stdlib/contract.cdc | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/fvm/evm/stdlib/contract.cdc b/fvm/evm/stdlib/contract.cdc index b70f5f90d3a..e8f4b63c291 100644 --- a/fvm/evm/stdlib/contract.cdc +++ b/fvm/evm/stdlib/contract.cdc @@ -21,6 +21,19 @@ contract EVM { access(all) event FLOWTokensWithdrawn(addressBytes: [UInt8; 20], amount: UFix64) + /// BridgeAccessorUpdated is emitted when the BridgeAccessor Capability + /// is updated in the stored BridgeRouter along with identifying + /// information about both. + access(all) + event BridgeAccessorUpdated( + routerType: Type, + routerUUID: UInt64, + routerAddress: Address, + accessorType: Type, + accessorUUID: UInt64, + accessorAddress: Address + ) + /// EVMAddress is an EVM-compatible address access(all) struct EVMAddress { @@ -589,6 +602,13 @@ contract EVM { /// Returns a reference to the BridgeAccessor designated for internal bridge requests access(all) view fun borrowBridgeAccessor(): &{BridgeAccessor} + + /// Sets the BridgeAccessor Capability in the BridgeRouter + access(all) fun setBridgeAccessor(_ accessor: Capability<&{BridgeAccessor}>) { + pre { + accessor.check(): "Invalid BridgeAccessor Capability provided" + } + } } /// Returns a reference to the BridgeAccessor designated for internal bridge requests From 5aad29ee391c0b4b519f8196aac8b077bd87d166 Mon Sep 17 00:00:00 2001 From: Giovanni Sanchez <108043524+sisyphusSmiling@users.noreply.github.com> Date: Wed, 17 Apr 2024 16:39:01 -0500 Subject: [PATCH 141/148] fix failing fvm tests --- fvm/fvm_test.go | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 6812f0c9bfe..4cc9059cbdf 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -3071,6 +3071,9 @@ func TestEVM(t *testing.T) { ).Return(block1.Header, nil) ctxOpts := []fvm.Option{ + // default is testnet, but testnet has a special EVM storage contract location + // so we have to use emulator here so that the EVM storage contract is deployed + // to the 5th address fvm.WithChain(flow.Emulator.Chain()), fvm.WithEVMEnabled(true), fvm.WithBlocks(blocks), @@ -3226,15 +3229,8 @@ func TestEVM(t *testing.T) { ) t.Run("deploy contract code", newVMTest(). - withBootstrapProcedureOptions( - fvm.WithSetupEVMEnabled(true), - ). - withContextOptions( - // default is testnet, but testnet has a special EVM storage contract location - // so we have to use emulator here so that the EVM storage contract is deployed - // to the 5th address - fvm.WithChain(flow.Emulator.Chain()), - ). + withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). + withContextOptions(ctxOpts...). run(func( t *testing.T, vm fvm.VM, From 2a6c870d6e32f9f8ab9f2d73a6edf251d68361d4 Mon Sep 17 00:00:00 2001 From: Andrii Slisarchuk Date: Thu, 18 Apr 2024 00:52:48 +0300 Subject: [PATCH 142/148] fixed remarks --- engine/access/rpc/backend/backend.go | 1 + engine/access/rpc/backend/backend_stream_transactions.go | 4 ++++ go.mod | 4 +--- go.sum | 4 ++-- insecure/go.mod | 4 +--- insecure/go.sum | 4 ++-- integration/go.mod | 4 +--- integration/go.sum | 4 ++-- 8 files changed, 14 insertions(+), 15 deletions(-) diff --git a/engine/access/rpc/backend/backend.go b/engine/access/rpc/backend/backend.go index cc9da264c49..1b7ef03ddba 100644 --- a/engine/access/rpc/backend/backend.go +++ b/engine/access/rpc/backend/backend.go @@ -262,6 +262,7 @@ func New(params Params) (*Backend, error) { systemTxID: systemTxID, } + // TODO: The TransactionErrorMessage interface should be reorganized in future, as it is implemented in backendTransactions but used in TransactionsLocalDataProvider, and its initialization is somewhat quirky. b.backendTransactions.txErrorMessages = b b.backendSubscribeTransactions = backendSubscribeTransactions{ diff --git a/engine/access/rpc/backend/backend_stream_transactions.go b/engine/access/rpc/backend/backend_stream_transactions.go index 846ceb38cb1..b8908365f32 100644 --- a/engine/access/rpc/backend/backend_stream_transactions.go +++ b/engine/access/rpc/backend/backend_stream_transactions.go @@ -151,6 +151,7 @@ func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *Tran // Possible orders of transaction statuses: // 1. pending(1) -> finalized(2) -> executed(3) -> sealed(4) // 2. pending(1) -> expired(5) +// No errors expected during normal operations. func (b *backendSubscribeTransactions) generateResultsWithMissingStatuses( txInfo *TransactionSubscriptionMetadata, prevTxStatus flow.TransactionStatus, @@ -200,6 +201,9 @@ func (b *backendSubscribeTransactions) generateResultsWithMissingStatuses( return results, nil } +// checkBlockReady checks if the given block height is valid and available based on the expected block status. +// Expected errors during normal operation: +// - subscription.ErrBlockNotReady: block for the given block height is not available. func (b *backendSubscribeTransactions) checkBlockReady(height uint64) error { // Get the highest available finalized block height highestHeight, err := b.blockTracker.GetHighestHeight(flow.BlockStatusFinalized) diff --git a/go.mod b/go.mod index fb57d79255a..636f9cda7d6 100644 --- a/go.mod +++ b/go.mod @@ -57,7 +57,7 @@ require ( github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 github.com/onflow/flow-core-contracts/lib/go/templates v0.15.1 github.com/onflow/flow-go-sdk v0.44.0 - github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e + github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pierrec/lz4 v2.6.1+incompatible github.com/pkg/errors v0.9.1 @@ -320,5 +320,3 @@ require ( // Using custom fork until https://github.com/onflow/flow-go/issues/5338 is resolved replace github.com/ipfs/boxo => github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 - -replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1 diff --git a/go.sum b/go.sum index f5b63549c42..17f4ba300d5 100644 --- a/go.sum +++ b/go.sum @@ -111,8 +111,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1 h1:T89Ybbj5UYJWBOfXA/c5NyrKmnlccP6gP6CaCu1xE6k= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= @@ -1372,6 +1370,8 @@ github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8 github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= +github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 h1:I+aosSiJny88O4p3nPbCiUcp/UqN6AepvO6uj82bjH0= +github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= github.com/onflow/sdks v0.5.0 h1:2HCRibwqDaQ1c9oUApnkZtEAhWiNY2GTpRD5+ftdkN8= diff --git a/insecure/go.mod b/insecure/go.mod index 5e18c7a09e2..5d2b0cba848 100644 --- a/insecure/go.mod +++ b/insecure/go.mod @@ -210,7 +210,7 @@ require ( github.com/onflow/flow-ft/lib/go/contracts v0.7.1-0.20230711213910-baad011d2b13 // indirect github.com/onflow/flow-go-sdk v0.46.0 // indirect github.com/onflow/flow-nft/lib/go/contracts v1.1.0 // indirect - github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e // indirect + github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 // indirect github.com/onflow/go-ethereum v1.13.4 // indirect github.com/onflow/sdks v0.5.0 // indirect github.com/onflow/wal v0.0.0-20240208022732-d756cd497d3b // indirect @@ -304,5 +304,3 @@ require ( ) replace github.com/onflow/flow-go => ../ - -replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1 diff --git a/insecure/go.sum b/insecure/go.sum index 4c37e7d0b4e..7c356c47ac0 100644 --- a/insecure/go.sum +++ b/insecure/go.sum @@ -101,8 +101,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1 h1:T89Ybbj5UYJWBOfXA/c5NyrKmnlccP6gP6CaCu1xE6k= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= @@ -1335,6 +1333,8 @@ github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8 github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= +github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 h1:I+aosSiJny88O4p3nPbCiUcp/UqN6AepvO6uj82bjH0= +github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= github.com/onflow/sdks v0.5.0 h1:2HCRibwqDaQ1c9oUApnkZtEAhWiNY2GTpRD5+ftdkN8= diff --git a/integration/go.mod b/integration/go.mod index e376afc4ef1..0bef55b212f 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -28,7 +28,7 @@ require ( github.com/onflow/flow-go v0.33.2-0.20240404171354-0b0592cc5bba github.com/onflow/flow-go-sdk v0.46.0 github.com/onflow/flow-go/insecure v0.0.0-00010101000000-000000000000 - github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e + github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 github.com/onflow/go-ethereum v1.13.4 github.com/plus3it/gorecurcopy v0.0.1 github.com/prometheus/client_golang v1.18.0 @@ -362,6 +362,4 @@ replace github.com/onflow/flow-go => ../ replace github.com/onflow/flow-go/insecure => ../insecure -replace github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e => github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1 - replace github.com/onflow/flow-emulator v0.61.2-0.20240404201132-f53137a8e4cb => github.com/The-K-R-O-K/flow-emulator v0.61.2-0.20240405094817-617a62ed021c diff --git a/integration/go.sum b/integration/go.sum index b3686fb75c2..cea6f1fa584 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -114,8 +114,6 @@ github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDO github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/The-K-R-O-K/flow-emulator v0.61.2-0.20240405094817-617a62ed021c h1:hEzq06oyMejfBy8uwFnmTXpopYJyx8AiJ2UdiTdcy/o= github.com/The-K-R-O-K/flow-emulator v0.61.2-0.20240405094817-617a62ed021c/go.mod h1:zQ57tVAxMwwRnPh/veQXS5M9JPAn2uVKkcBH0wGjQc4= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1 h1:T89Ybbj5UYJWBOfXA/c5NyrKmnlccP6gP6CaCu1xE6k= -github.com/The-K-R-O-K/flow/protobuf/go/flow v0.0.0-20240404225450-1c90bb644fe1/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= @@ -1427,6 +1425,8 @@ github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8 github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= +github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 h1:I+aosSiJny88O4p3nPbCiUcp/UqN6AepvO6uj82bjH0= +github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= github.com/onflow/nft-storefront/lib/go/contracts v0.0.0-20221222181731-14b90207cead h1:2j1Unqs76Z1b95Gu4C3Y28hzNUHBix7wL490e61SMSw= From 470b4266a257aa02442b03b6e3ebd3f95b0edcc0 Mon Sep 17 00:00:00 2001 From: Andrii Slisarchuk Date: Thu, 18 Apr 2024 17:18:56 +0300 Subject: [PATCH 143/148] Changed emulator version --- integration/go.mod | 6 +++--- integration/go.sum | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/integration/go.mod b/integration/go.mod index 0bef55b212f..0016f4554bf 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -24,9 +24,9 @@ require ( github.com/onflow/crypto v0.25.1 github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 github.com/onflow/flow-core-contracts/lib/go/templates v0.15.1 - github.com/onflow/flow-emulator v0.61.2-0.20240404201132-f53137a8e4cb - github.com/onflow/flow-go v0.33.2-0.20240404171354-0b0592cc5bba - github.com/onflow/flow-go-sdk v0.46.0 + github.com/onflow/flow-emulator v0.62.2-0.20240418140508-d969ff66d9cd + github.com/onflow/flow-go v0.33.2-0.20240412174857-015156b297b5 + github.com/onflow/flow-go-sdk v0.46.2 github.com/onflow/flow-go/insecure v0.0.0-00010101000000-000000000000 github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 github.com/onflow/go-ethereum v1.13.4 diff --git a/integration/go.sum b/integration/go.sum index cea6f1fa584..f2f704a4c86 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -112,8 +112,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/The-K-R-O-K/flow-emulator v0.61.2-0.20240405094817-617a62ed021c h1:hEzq06oyMejfBy8uwFnmTXpopYJyx8AiJ2UdiTdcy/o= -github.com/The-K-R-O-K/flow-emulator v0.61.2-0.20240405094817-617a62ed021c/go.mod h1:zQ57tVAxMwwRnPh/veQXS5M9JPAn2uVKkcBH0wGjQc4= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= @@ -1416,11 +1414,13 @@ github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 h1:xF5wHug6H8vKfz github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1/go.mod h1:WHp24VkUQfcfZi0XjI1uRVRt5alM5SHVkwOil1U2Tpc= github.com/onflow/flow-core-contracts/lib/go/templates v0.15.1 h1:EjWjbyVEA+bMxXbM44dE6MsYeqOu5a9q/EwSWa4ma2M= github.com/onflow/flow-core-contracts/lib/go/templates v0.15.1/go.mod h1:c09d6sNyF/j5/pAynK7sNPb1XKqJqk1rxZPEqEL+dUo= +github.com/onflow/flow-emulator v0.62.2-0.20240418140508-d969ff66d9cd h1:bR5IxvTK4HApiJt+OP+mLNKkVkr75piaLu8wDT6uKDA= +github.com/onflow/flow-emulator v0.62.2-0.20240418140508-d969ff66d9cd/go.mod h1:ONxdb0U5kE7XK8B1ZAAo6JAzYRAtC6oh9I8WAfi9I+E= github.com/onflow/flow-ft/lib/go/contracts v0.7.1-0.20230711213910-baad011d2b13 h1:B4ll7e3j+MqTJv2122Enq3RtDNzmIGRu9xjV7fo7un0= github.com/onflow/flow-ft/lib/go/contracts v0.7.1-0.20230711213910-baad011d2b13/go.mod h1:kTMFIySzEJJeupk+7EmXs0EJ6CBWY/MV9fv9iYQk+RU= github.com/onflow/flow-go-sdk v0.24.0/go.mod h1:IoptMLPyFXWvyd9yYA6/4EmSeeozl6nJoIv4FaEMg74= -github.com/onflow/flow-go-sdk v0.46.0 h1:mrIQziCDe6Oi5HH/aPFvYluh1XUwO6lYpoXLWrBZc2s= -github.com/onflow/flow-go-sdk v0.46.0/go.mod h1:azVWF0yHI8wT1erF0vuYGqQZybl6Frbc+0Zu3rIPeHc= +github.com/onflow/flow-go-sdk v0.46.2 h1:ypVGBeH9m5XpBOTU/CYVC0y/+z42e8mhUlq5aLiD24A= +github.com/onflow/flow-go-sdk v0.46.2/go.mod h1:tfLjB9FZmwqtT5gaacjvpIhz7KCd67YPm6v+iqYAjEA= github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8OSD97bJc60cLuQ= github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= From e2d1b51753cc1e01e05ee529cfaf11f03f2cffea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Thu, 18 Apr 2024 15:32:40 -0700 Subject: [PATCH 144/148] fix EVM load test: provide blocks and block header --- integration/benchmark/load/load_type_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/integration/benchmark/load/load_type_test.go b/integration/benchmark/load/load_type_test.go index 1517924a7e5..fee4c2b118f 100644 --- a/integration/benchmark/load/load_type_test.go +++ b/integration/benchmark/load/load_type_test.go @@ -19,6 +19,7 @@ import ( "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/environment" + envMock "github.com/onflow/flow-go/fvm/environment/mock" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/integration/benchmark/account" "github.com/onflow/flow-go/integration/benchmark/common" @@ -119,12 +120,21 @@ func testLoad(log zerolog.Logger, l load.Load) func(t *testing.T) { func bootstrapVM(t *testing.T, chain flow.Chain) (*fvm.VirtualMachine, fvm.Context, snapshot.SnapshotTree) { source := testutil.EntropyProviderFixture(nil) + blocks := new(envMock.Blocks) + block1 := unittest.BlockFixture() + blocks.On("ByHeightFrom", + block1.Header.Height, + block1.Header, + ).Return(block1.Header, nil) + opts := computation.DefaultFVMOptions(chain.ChainID(), false, false) opts = append(opts, fvm.WithTransactionFeesEnabled(true), fvm.WithAccountStorageLimit(true), fvm.WithContractDeploymentRestricted(false), fvm.WithEntropyProvider(source), + fvm.WithBlocks(blocks), + fvm.WithBlockHeader(block1.Header), ) ctx := fvm.NewContext(opts...) From 0fca805da1cc7876b0a8325f1cff2715c2475d44 Mon Sep 17 00:00:00 2001 From: Andrii Slisarchuk Date: Sat, 20 Apr 2024 01:16:44 +0300 Subject: [PATCH 145/148] removed replace --- integration/go.mod | 2 -- 1 file changed, 2 deletions(-) diff --git a/integration/go.mod b/integration/go.mod index 0016f4554bf..5e1a1997f5e 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -361,5 +361,3 @@ require ( replace github.com/onflow/flow-go => ../ replace github.com/onflow/flow-go/insecure => ../insecure - -replace github.com/onflow/flow-emulator v0.61.2-0.20240404201132-f53137a8e4cb => github.com/The-K-R-O-K/flow-emulator v0.61.2-0.20240405094817-617a62ed021c From e53ae2c2597bc9177d14e156c016929f838660c0 Mon Sep 17 00:00:00 2001 From: Jan Bernatik Date: Mon, 22 Apr 2024 19:16:06 +0200 Subject: [PATCH 146/148] port https://github.com/onflow/flow-go/pull/5750 --- cmd/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/Dockerfile b/cmd/Dockerfile index ba858d12b9c..5a746b3bd70 100644 --- a/cmd/Dockerfile +++ b/cmd/Dockerfile @@ -37,7 +37,7 @@ WORKDIR /app ARG GOARCH=amd64 # TAGS can be overriden to modify the go build tags (e.g. build without netgo) -ARG TAGS="netgo" +ARG TAGS="netgo,osusergo" # CC flag can be overwritten to specify a C compiler ARG CC="" # CGO_FLAG uses ADX instructions by default, flag can be overwritten to build without ADX From 9b0c220aaf72da7267a440a7b01d517eed7b625b Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 12 Apr 2024 11:39:56 -0700 Subject: [PATCH 147/148] fix traversal error handling --- state/fork/traversal.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/state/fork/traversal.go b/state/fork/traversal.go index 18fdcdcbc36..40ce200770b 100644 --- a/state/fork/traversal.go +++ b/state/fork/traversal.go @@ -107,9 +107,11 @@ func unsafeTraverse(headers storage.Headers, block *flow.Header, visitor onVisit return block, nil } - block, err = headers.ByBlockID(block.ParentID) + parent, err := headers.ByBlockID(block.ParentID) if err != nil { - return nil, fmt.Errorf("failed to revtrieve block header %x: %w", block.ParentID, err) + return nil, fmt.Errorf("failed to revtrieve block header %v (%x): %w", block.Height, block.ParentID, err) } + + block = parent } } From 4860e671fbc0281f0e9be43b7d91e8a147588d4a Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Mon, 15 Apr 2024 08:57:14 -0700 Subject: [PATCH 148/148] Update state/fork/traversal.go Co-authored-by: Jordan Schalm --- state/fork/traversal.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/state/fork/traversal.go b/state/fork/traversal.go index 40ce200770b..f66633884ca 100644 --- a/state/fork/traversal.go +++ b/state/fork/traversal.go @@ -109,7 +109,7 @@ func unsafeTraverse(headers storage.Headers, block *flow.Header, visitor onVisit parent, err := headers.ByBlockID(block.ParentID) if err != nil { - return nil, fmt.Errorf("failed to revtrieve block header %v (%x): %w", block.Height, block.ParentID, err) + return nil, fmt.Errorf("failed to retrieve block header (id=%x height=%d): %w", block.ParentID, block.Height-1, err) } block = parent