diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml index 2a1f53c9cca..93bc5dc2280 100644 --- a/.github/workflows/workflow.yml +++ b/.github/workflows/workflow.yml @@ -189,6 +189,8 @@ jobs: run: | ./ci-bin/capture-log "DB=etl bundle exec rake db:create db:schema:load db:migrate" ./ci-bin/capture-log "bundle exec rake db:create db:schema:load db:migrate" + ./ci-bin/capture-log "make -f Makefile.example external-db-create" + # added line to create external table(s) that are needed for tests # We don't want to seed DBs here because DatabaseCleaner just truncates it anyway. The setup_vacols # rake task needs to be run because it adds data to two tables that are ignored by DBCleaner diff --git a/Gemfile b/Gemfile index d334b2634f8..71579c27889 100644 --- a/Gemfile +++ b/Gemfile @@ -47,8 +47,8 @@ gem "paranoia", "~> 2.2" gem "pdf-forms" # Used in Caseflow Dispatch gem "pdfjs_viewer-rails", git: "https://github.com/senny/pdfjs_viewer-rails.git", ref: "a4249eacbf70175db63b57e9f364d0a9a79e2b43" -#Used to build out PDF files on the backend -#https://github.com/pdfkit/pdfkit +# Used to build out PDF files on the backend +# https://github.com/pdfkit/pdfkit gem "pdfkit" gem "pg", platforms: :ruby # Application server: Puma @@ -61,6 +61,7 @@ gem "rails", "5.2.4.6" gem "rainbow" # React gem "react_on_rails", "11.3.0" +gem "redis-mutex" gem "redis-namespace" gem "redis-rails", "~> 5.0.2" gem "request_store" diff --git a/Gemfile.lock b/Gemfile.lock index 710c4378d8d..b30d239e337 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -523,6 +523,10 @@ GEM redis-activesupport (5.0.4) activesupport (>= 3, < 6) redis-store (>= 1.3, < 2) + redis-classy (2.4.1) + redis-namespace (~> 1.0) + redis-mutex (4.0.2) + redis-classy (~> 2.0) redis-namespace (1.6.0) redis (>= 3.0.4) redis-rack (2.0.4) @@ -783,6 +787,7 @@ DEPENDENCIES rainbow rb-readline react_on_rails (= 11.3.0) + redis-mutex redis-namespace redis-rails (~> 5.0.2) request_store diff --git a/Makefile.example b/Makefile.example index 6b6ac721986..3d187a5428f 100644 --- a/Makefile.example +++ b/Makefile.example @@ -156,20 +156,44 @@ audit: ## Create caseflow_audit schema, tables, and triggers in postgres bundle exec rails r db/scripts/audit/tables/create_vbms_distributions_audit.rb bundle exec rails r db/scripts/audit/tables/create_vbms_distribution_destinations_audit.rb bundle exec rails r db/scripts/audit/tables/create_vbms_uploaded_documents_audit.rb + bundle exec rails r db/scripts/audit/tables/create_priority_end_product_sync_queue_audit.rb bundle exec rails r db/scripts/audit/functions/add_row_to_appeal_states_audit_table_function.rb bundle exec rails r db/scripts/audit/functions/add_row_to_vbms_communication_packages_audit_table_function.rb bundle exec rails r db/scripts/audit/functions/add_row_to_vbms_distributions_audit_table_function.rb bundle exec rails r db/scripts/audit/functions/add_row_to_vbms_distribution_destinations_audit_table_function.rb bundle exec rails r db/scripts/audit/functions/add_row_to_vbms_uploaded_documents_audit_table_function.rb + bundle exec rails r db/scripts/audit/functions/add_row_to_priority_end_product_sync_queue_audit_table_function.rb bundle exec rails r db/scripts/audit/triggers/create_appeal_states_audit_trigger.rb bundle exec rails r db/scripts/audit/triggers/create_vbms_communication_packages_audit_trigger.rb bundle exec rails r db/scripts/audit/triggers/create_vbms_distributions_audit_trigger.rb bundle exec rails r db/scripts/audit/triggers/create_vbms_distribution_destinations_audit_trigger.rb bundle exec rails r db/scripts/audit/triggers/create_vbms_uploaded_documents_audit_trigger.rb + bundle exec rails r db/scripts/audit/triggers/create_priority_end_product_sync_queue_audit_trigger.rb audit-remove: ## Remove caseflow_audit schema, tables and triggers in postgres bundle exec rails r db/scripts/audit/remove_caseflow_audit_schema.rb +# The external-db make commands create/remove replicas (for local environment only) of external db tables that exist in Prod +# These tables should not be included as part of migrations +external-db-create: ## Creates external_vbms_ext_claim table + bundle exec rails r db/scripts/external/create_vbms_ext_claim_table.rb + +external-db-remove: ## Remove external_vbms_ext_claim table + bundle exec rails r db/scripts/external/remove_vbms_ext_claim_table.rb + +# This needs to be manually run after make reset/migrate in order for local tests involving external tables to pass. +# Otherwise the caseflow_certification_test schema will not create these tables and will error out. +external-db-create-test: ## Creates table in caseflow_certification_test DB for local RSPEC tests + bundle exec rails r -e test db/scripts/external/create_vbms_ext_claim_table.rb + +remove-vbms-ext-claim-seeds: ## Drops audit tables, removes all PriorityEndProductSyncQueue, BatchProcess, and seed-vbms-ext-claim records, then rebuilds audit tables + make audit-remove + make external-db-create + bundle exec rails r db/scripts/external/remove_vbms_ext_claim_seeds.rb + make audit + +reseed-vbms-ext-claim: remove-vbms-ext-claim-seeds seed-vbms-ext-claim ## Re-seeds database with records created from seed-vbms-ext-claim + c: ## Start rails console bundle exec rails console @@ -188,7 +212,7 @@ db-migrate: ## Migrate main Caseflow db db-rollback: ## Rollback main Caseflow db bundle exec rake db:rollback -migrate: etl-migrate etl-test-prepare db-migrate ## Migrate all Rails databases +migrate: external-db-remove etl-migrate etl-test-prepare db-migrate ## Migrate all non-external Rails databases rollback: etl-rollback db-rollback ## Rollback all Rails databases @@ -199,9 +223,14 @@ reset: reset-dbs seed-dbs enable-feature-flags ## Resets databases and enable fe reset-dbs: ## Resets Caseflow and ETL database schemas make audit-remove + make external-db-remove DB=etl bundle exec rake db:drop db:create db:schema:load bundle exec rake db:drop db:create db:schema:load make audit + make external-db-create + +seed-vbms-ext-claim: ## Seed only vbms_ext_claim + bundle exec rake db:seed:vbms_ext_claim seed-dbs: ## Seed all databases bundle exec rake local:vacols:seed diff --git a/app/jobs/batch_processes/batch_process_rescue_job.rb b/app/jobs/batch_processes/batch_process_rescue_job.rb new file mode 100644 index 00000000000..890c6820c28 --- /dev/null +++ b/app/jobs/batch_processes/batch_process_rescue_job.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +# This job will search for and reprocess unfinished Batch Processes nightly. +# Search Criteria is for Batch Processes that are in an unfinished state ('PRE_PROCESSING', 'PROCESSING') & +# have a created_at date/time that is greater than the ERROR_DELAY defined within batch_process.rb +class BatchProcessRescueJob < CaseflowJob + queue_with_priority :low_priority + + # :reek:FeatureEnvy + def perform + batches = BatchProcess.needs_reprocessing + if batches.any? + batches.each do |batch| + begin + batch.process_batch! + rescue StandardError => error + log_error(error, extra: { active_job_id: job_id.to_s, job_time: Time.zone.now.to_s }) + slack_msg = "Error running #{self.class.name}. Error: #{error.message}. Active Job ID: #{job_id}." + slack_msg += " See Sentry event #{Raven.last_event_id}." if Raven.last_event_id.present? + slack_service.send_notification("[ERROR] #{slack_msg}", self.class.to_s) + next + end + end + else + Rails.logger.info("No Unfinished Batches Could Be Identified. Time: #{Time.zone.now}.") + end + end +end diff --git a/app/jobs/batch_processes/priority_ep_sync_batch_process_job.rb b/app/jobs/batch_processes/priority_ep_sync_batch_process_job.rb new file mode 100644 index 00000000000..539ed5a050f --- /dev/null +++ b/app/jobs/batch_processes/priority_ep_sync_batch_process_job.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +class PriorityEpSyncBatchProcessJob < CaseflowJob + queue_with_priority :low_priority + + # Using macro-style definition. The locking scope will be TheClass#method and only one method can run at any + # given time. + include RedisMutex::Macro + + # Default options for RedisMutex#with_lock + # :block => 1 # Specify in seconds how long you want to wait for the lock to be released. + # # Specify 0 if you need non-blocking sematics and return false immediately. (default: 1) + # :sleep => 0.1 # Specify in seconds how long the polling interval should be when :block is given. + # # It is NOT recommended to go below 0.01. (default: 0.1) + # :expire => 10 # Specify in seconds when the lock should be considered stale when something went wrong + # # with the one who held the lock and failed to unlock. (default: 10) + # + # RedisMutex.with_lock("PriorityEpSyncBatchProcessJob", block: 60, expire: 100) + # Key => "PriorityEpSyncBatchProcessJob" + + JOB_DURATION ||= ENV["BATCH_PROCESS_JOB_DURATION"].to_i.minutes + SLEEP_DURATION ||= ENV["BATCH_PROCESS_SLEEP_DURATION"].to_i + + # Attempts to create & process batches for 50 minutes + # There will be a 5 second rest between each iteration + # Job will end if there are no records are left to batch + + # rubocop:disable Metrics/MethodLength, Metrics/AbcSize, Metrics/CyclomaticComplexity + def perform + setup_job + loop do + break if job_running_past_expected_end_time? || should_stop_job + + begin + batch = nil + RedisMutex.with_lock("PriorityEpSyncBatchProcessJob", block: 60, expire: 100) do + batch = ActiveRecord::Base.transaction do + records_to_batch = PriorityEpSyncBatchProcess.find_records_to_batch + next if records_to_batch.empty? + + PriorityEpSyncBatchProcess.create_batch!(records_to_batch) + end + end + + batch ? batch.process_batch! : stop_job(log_no_records_found: true) + + sleep(SLEEP_DURATION) + rescue StandardError => error + log_error(error, extra: { job_id: job_id.to_s, job_time: Time.zone.now.to_s }) + slack_msg = "Error running #{self.class.name}. Error: #{error.message}. Active Job ID: #{job_id}." + slack_msg += " See Sentry event #{Raven.last_event_id}." if Raven.last_event_id.present? + slack_service.send_notification("[ERROR] #{slack_msg}", self.class.to_s) + stop_job + end + end + end + # rubocop:enable Metrics/MethodLength, Metrics/AbcSize, Metrics/CyclomaticComplexity + + private + + attr_accessor :job_expected_end_time, :should_stop_job + + def setup_job + RequestStore.store[:current_user] = User.system_user + @should_stop_job = false + @job_expected_end_time = Time.zone.now + JOB_DURATION + end + + def job_running_past_expected_end_time? + Time.zone.now > job_expected_end_time + end + + # :reek:BooleanParameter + # :reek:ControlParameter + def stop_job(log_no_records_found: false) + self.should_stop_job = true + if log_no_records_found + Rails.logger.info("#{self.class} Cannot Find Any Records to Batch."\ + " Job will be enqueued again at the top of the hour. Active Job ID: #{job_id}. Time: #{Time.zone.now}") + end + end +end diff --git a/app/jobs/populate_end_product_sync_queue_job.rb b/app/jobs/populate_end_product_sync_queue_job.rb new file mode 100644 index 00000000000..079c1410ec2 --- /dev/null +++ b/app/jobs/populate_end_product_sync_queue_job.rb @@ -0,0 +1,89 @@ +# frozen_string_literal: true + +# This job will find deltas between the end product establishment table and the VBMS ext claim table +# where VBMS ext claim level status code is CLR or CAN. If EP is already in the queue it will be skipped. +# Job will populate queue ENV["END_PRODUCT_QUEUE_BATCH_LIMIT"] records at a time. +# This job will run on a 50 minute loop, sleeping for 5 seconds between iterations. +class PopulateEndProductSyncQueueJob < CaseflowJob + queue_with_priority :low_priority + + JOB_DURATION ||= ENV["END_PRODUCT_QUEUE_JOB_DURATION"].to_i.minutes + SLEEP_DURATION ||= ENV["END_PRODUCT_QUEUE_SLEEP_DURATION"].to_i + BATCH_LIMIT ||= ENV["END_PRODUCT_QUEUE_BATCH_LIMIT"].to_i + + # rubocop:disable Metrics/CyclomaticComplexity + def perform + setup_job + loop do + break if job_running_past_expected_end_time? || should_stop_job + + begin + batch = ActiveRecord::Base.transaction do + priority_epes = find_priority_end_product_establishments_to_sync + next if priority_epes.empty? + + priority_epes + end + + batch ? insert_into_priority_sync_queue(batch) : stop_job(log_no_records_found: true) + + sleep(SLEEP_DURATION) + rescue StandardError => error + log_error(error, extra: { active_job_id: job_id.to_s, job_time: Time.zone.now.to_s }) + slack_msg = "Error running #{self.class.name}. Error: #{error.message}. Active Job ID: #{job_id}." + slack_msg += " See Sentry event #{Raven.last_event_id}." if Raven.last_event_id.present? + slack_service.send_notification("[ERROR] #{slack_msg}", self.class.to_s) + stop_job + end + end + end + # rubocop:enable Metrics/CyclomaticComplexity + + private + + attr_accessor :job_expected_end_time, :should_stop_job + + def find_priority_end_product_establishments_to_sync + get_batch = <<-SQL + select id + from end_product_establishments + inner join vbms_ext_claim + on end_product_establishments.reference_id = vbms_ext_claim."CLAIM_ID"::varchar + where (end_product_establishments.synced_status <> vbms_ext_claim."LEVEL_STATUS_CODE" or end_product_establishments.synced_status is null) + and vbms_ext_claim."LEVEL_STATUS_CODE" in ('CLR','CAN') + and end_product_establishments.id not in (select end_product_establishment_id from priority_end_product_sync_queue) + limit #{BATCH_LIMIT}; + SQL + + ActiveRecord::Base.connection.exec_query(ActiveRecord::Base.sanitize_sql(get_batch)).rows.flatten + end + + def insert_into_priority_sync_queue(batch) + batch.each do |ep_id| + PriorityEndProductSyncQueue.create!( + end_product_establishment_id: ep_id + ) + end + Rails.logger.info("PopulateEndProductSyncQueueJob EPEs processed: #{batch} - Time: #{Time.zone.now}") + end + + def setup_job + RequestStore.store[:current_user] = User.system_user + @should_stop_job = false + @job_expected_end_time = Time.zone.now + JOB_DURATION + end + + def job_running_past_expected_end_time? + Time.zone.now > job_expected_end_time + end + + # :reek:BooleanParameter + # :reek:ControlParameter + def stop_job(log_no_records_found: false) + self.should_stop_job = true + if log_no_records_found + Rails.logger.info("PopulateEndProductSyncQueueJob is not able to find any batchable EPE records."\ + " Active Job ID: #{job_id}. Time: #{Time.zone.now}") + end + end +end diff --git a/app/models/batch_processes/batch_process.rb b/app/models/batch_processes/batch_process.rb new file mode 100644 index 00000000000..8423dad60c6 --- /dev/null +++ b/app/models/batch_processes/batch_process.rb @@ -0,0 +1,107 @@ +# frozen_string_literal: true + +class BatchProcess < CaseflowRecord + self.inheritance_column = :batch_type + has_many :priority_end_product_sync_queue, foreign_key: "batch_id", primary_key: "batch_id" + has_many :end_product_establishments, through: :priority_end_product_sync_queue + after_initialize :init_counters + + ERROR_LIMIT = ENV["BATCH_PROCESS_MAX_ERRORS_BEFORE_STUCK"].to_i + ERROR_DELAY = ENV["BATCH_PROCESS_ERROR_DELAY"].to_i + BATCH_LIMIT = ENV["BATCH_PROCESS_BATCH_LIMIT"].to_i + + scope :completed_batch_process_ids, -> { where(state: Constants.BATCH_PROCESS.completed).select(:batch_id) } + scope :needs_reprocessing, lambda { + where("created_at <= ? AND state <> ?", BatchProcess::ERROR_DELAY.hours.ago, Constants.BATCH_PROCESS.completed) + } + + enum state: { + Constants.BATCH_PROCESS.pre_processing.to_sym => Constants.BATCH_PROCESS.pre_processing, + Constants.BATCH_PROCESS.processing.to_sym => Constants.BATCH_PROCESS.processing, + Constants.BATCH_PROCESS.completed.to_sym => Constants.BATCH_PROCESS.completed + } + + class << self + # Purpose: A no-op method for overriding, intended to find records to batch from a Queue table + # + # Params: None + # + # Response: Records to Batch + def find_records_to_batch + # no-op, can be overwritten + end + + # Purpose: A no-op method for overriding, intended to create a Batch Process record and assign its batch_id + # to the records gathered by the find_records_to_batch method. + # + # Params: Records retrieved from a Queue table that need to be assigned to a Batch Process + # + # Response: Newly Created Batch Process + # :reek:UnusedParameters + def create_batch!(_records) + # no-op, can be overwritten + end + end + + # Purpose: A no-op method for overriding, intended to process all records assinged to a Batch Process + # + # Params: None + # + # Response: Returns True if batch is processed successfully + def process_batch! + # no-op, can be overwritten + end + + private + + attr_accessor :completed_count, :failed_count + + # Initialize Counters + def init_counters + @completed_count = 0 + @failed_count = 0 + end + + def increment_completed + self.completed_count += 1 + end + + def increment_failed + self.failed_count += 1 + end + + # State update Methods + def batch_processing! + update!(state: Constants.BATCH_PROCESS.processing, started_at: Time.zone.now) + end + + def batch_complete! + update!(state: Constants.BATCH_PROCESS.completed, + records_failed: failed_count, + records_completed: completed_count, + ended_at: Time.zone.now) + end + + # When a record and error is sent to this method, it updates the record and checks to see + # if the record should be declared stuck. If the records should be stuck, it calls the + # declare_record_stuck method (Found in priority_end_product_sync_queue.rb). + # Otherwise, the record is updated with status: error and the error message is added to + # error_messages. + # + # As a general method, it's assumed the record has a batch_id and error_messages + # column within the associated table. + # :reek:FeatureEnvy + def error_out_record!(record, error) + increment_failed + error_array = record.error_messages || [] + error_array.push("Error: #{error.inspect} - Batch ID: #{record.batch_id} - Time: #{Time.zone.now}.") + + if error_array.length >= ERROR_LIMIT + record.declare_record_stuck! + else + record.status_error!(error_array) + end + + Rails.logger.error(error.inspect) + end +end diff --git a/app/models/batch_processes/priority_ep_sync_batch_process.rb b/app/models/batch_processes/priority_ep_sync_batch_process.rb new file mode 100644 index 00000000000..70fff6a681f --- /dev/null +++ b/app/models/batch_processes/priority_ep_sync_batch_process.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +class PriorityEpSyncBatchProcess < BatchProcess + class << self + # Purpose: Finds records to batch from the Priority End Product Sync Queue (PEPSQ) table that + # have NO batch_id OR have a batch_id tied to a COMPLETED Batch Process (BATCHABLE), + # do NOT have a status of SYNCED OR STUCK (SYNCABLE), + # and have a last_batched_at date/time that is NULL OR greater than the ERROR_DELAY (READY_TO_BATCH). + # + # Params: None + # + # Response: PEPSQ records + def find_records_to_batch + PriorityEndProductSyncQueue.batchable.syncable.ready_to_batch.batch_limit + end + + # Purpose: Creates a Batch Process record and assigns its batch_id + # to the PEPSQ records gathered by the find_records_to_batch method. + # + # Params: Records retrieved from the Priority End Product Sync Queue (PEPSQ) table + # + # Response: Newly Created Batch Process + def create_batch!(records) + new_batch = PriorityEpSyncBatchProcess.create!(batch_type: name, + state: Constants.BATCH_PROCESS.pre_processing, + records_attempted: records.count) + + new_batch.assign_batch_to_queued_records!(records) + new_batch + end + end + + # Purpose: Updates the Batch Process status to processing then loops through each record within + # the batch. Each record's status is updated to processing, then the #sync! method is attempted. + # If the record fails, the error_out_record! method is called. + # + # Params: None + # + # Response: Returns True if batch is processed successfully + # rubocop:disable Metrics/MethodLength + # :reek:FeatureEnvy + def process_batch! + batch_processing! + + priority_end_product_sync_queue.each do |record| + record.status_processing! + epe = record.end_product_establishment + + begin + epe.sync! + epe.reload + + if epe.vbms_ext_claim.nil? + fail Caseflow::Error::PriorityEndProductSyncError, "Claim ID: #{epe.reference_id} not In VBMS_EXT_CLAIM." + elsif epe.synced_status != epe.vbms_ext_claim&.level_status_code + fail Caseflow::Error::PriorityEndProductSyncError, "EPE ID: #{epe&.id}. EPE synced_status of"\ + " #{epe.synced_status} does not match the VBMS_EXT_CLAIM level_status_code of"\ + " #{epe.vbms_ext_claim&.level_status_code}." + end + rescue StandardError => error + error_out_record!(record, error) + next + end + + record.status_sync! + increment_completed + end + + batch_complete! + end + # rubocop:enable Metrics/MethodLength + + # Purpose: Assigns the Batch Process batch_id to Priority End Product Sync Queue (PEPSQ) records. + # + # Params: Records retrieved from the Priority End Product Sync Queue (PEPSQ) table + # + # Response: Newly batched PEPSQ records + def assign_batch_to_queued_records!(records) + records.each do |pepsq_record| + pepsq_record.update!(batch_id: batch_id, + status: Constants.PRIORITY_EP_SYNC.pre_processing, + last_batched_at: Time.zone.now) + end + end +end diff --git a/app/models/caseflow_stuck_record.rb b/app/models/caseflow_stuck_record.rb new file mode 100644 index 00000000000..f17111c7a50 --- /dev/null +++ b/app/models/caseflow_stuck_record.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +# This table consists of records that have repeatedly attempted +# to sync or be processed in some way but have continuously errored out. +# This table is polymorphic, records on this table could belong to more than one table. +# Records on this table are intended to be checked and fixed manually. + +class CaseflowStuckRecord < CaseflowRecord + belongs_to :stuck_record, polymorphic: true + + # Custom model association that will return the end_product_establishment for + # stuck records that are from the PriorityEndProductSyncQueue + def end_product_establishment + if stuck_record.is_a?(PriorityEndProductSyncQueue) + stuck_record.end_product_establishment + end + end +end diff --git a/app/models/end_product_establishment.rb b/app/models/end_product_establishment.rb index 3c044f2ff69..33cac326d2f 100644 --- a/app/models/end_product_establishment.rb +++ b/app/models/end_product_establishment.rb @@ -9,12 +9,27 @@ # the current status of the EP when the EndProductEstablishment is synced. class EndProductEstablishment < CaseflowRecord + # Using macro-style definition. The locking scope will be TheClass + # method and only one method can run at any given time. + include RedisMutex::Macro + belongs_to :source, polymorphic: true belongs_to :user has_many :request_issues has_many :end_product_code_updates has_many :effectuations, class_name: "BoardGrantEffectuation" has_many :end_product_updates + has_one :priority_end_product_sync_queue + belongs_to :vbms_ext_claim, foreign_key: "reference_id", primary_key: "claim_id", optional: true + + # :block => 1 # Specify in seconds how long you want to wait for the lock to be released. + # # Specify 0 if you need non-blocking sematics and return false immediately. (default: 1) + # :sleep => 0.1 # Specify in seconds how long the polling interval should be when :block is given. + # # It is NOT recommended to go below 0.01. (default: 0.1) + # :expire => 10 # Specify in seconds when the lock should be considered stale when something went wrong + # # with the one who held the lock and failed to unlock. (default: 10) + # auto_mutex :sync!, block: 60, expire: 100, after_failure: lambda { Rails.logger.error('failed to acquire lock! + # EPE sync is being called by another process. Please try again later.') } # allow @veteran to be assigned to save upstream calls attr_writer :veteran @@ -46,7 +61,7 @@ def active # We only know the set of inactive EP statuses # We also only know the EP status after fetching it from BGS # Therefore, our definition of active is when the EP is either - # not known or not known to be inactive + # not known or not known to be inactive established.where("synced_status NOT IN (?) OR synced_status IS NULL", EndProduct::INACTIVE_STATUSES) end end @@ -197,25 +212,31 @@ def cancel_unused_end_product! end end + # rubocop:disable Metrics/MethodLength def sync! - # There is no need to sync end_product_status if the status - # is already inactive since an EP can never leave that state - return true unless status_active? - - fail EstablishedEndProductNotFound, id unless result - - # load contentions now, in case "source" needs them. - # this VBMS call is slow and will cause the transaction below - # to timeout in some cases. - contentions unless result.status_type_code == EndProduct::STATUSES.key("Canceled") + RedisMutex.with_lock("EndProductEstablishment:#{id}", block: 60, expire: 100) do + # key => "EndProductEstablishment:id" + # There is no need to sync end_product_status if the status + # is already inactive since an EP can never leave that state + return true unless status_active? + + fail EstablishedEndProductNotFound, id unless result + + # load contentions now, in case "source" needs them. + # this VBMS call is slow and will cause the transaction below to timeout in some cases. + contentions unless result.status_type_code == EndProduct::STATUSES.key("Canceled") + + transaction do + update!(synced_status: result.status_type_code) + status_cancelled? ? handle_cancelled_ep! : sync_source! + close_request_issues_with_no_decision! + end - transaction do - update!(synced_status: result.status_type_code) - status_cancelled? ? handle_cancelled_ep! : sync_source! - close_request_issues_with_no_decision! + save_updated_end_product_code! end - - save_updated_end_product_code! + rescue RedisMutex::LockError + Rails.logger.error("Failed to acquire lock for EPE ID: #{id}! #sync! is being"\ + " called by another process. Please try again later.") rescue EstablishedEndProductNotFound, AppealRepository::AppealNotValidToReopen => error raise error rescue StandardError => error @@ -228,6 +249,8 @@ def sync! update!(last_synced_at: Time.zone.now) end + # rubocop:enable Metrics/MethodLength + def fetch_dispositions_from_vbms VBMSService.get_dispositions!(claim_id: reference_id) end @@ -292,6 +315,15 @@ def associated_rating @associated_rating ||= fetch_associated_rating end + # Purpose: Check if End Product Establishment is enqueued in the Priority End Product Sync Queue. + # + # Params: NONE + # + # Response: True if End Product Establishment is queued to sync. False if not. + def priority_queued? + priority_end_product_sync_queue ? true : false + end + def sync_decision_issues! contention_records.each do |record| if record.respond_to?(:nonrating?) && record.nonrating? diff --git a/app/models/external_models/vbms_ext_claim.rb b/app/models/external_models/vbms_ext_claim.rb new file mode 100644 index 00000000000..5f430e96fe5 --- /dev/null +++ b/app/models/external_models/vbms_ext_claim.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +# This model represents entries in the vbms_ext_claim table +# VbmsExtClaims can have an associated EndProductEstablishment + +class VbmsExtClaim < CaseflowRecord + self.table_name = "vbms_ext_claim" + self.primary_key = "CLAIM_ID" + + has_one :end_product_establishment, foreign_key: "reference_id", primary_key: "claim_id" + + alias_attribute :claim_id, :CLAIM_ID + alias_attribute :claim_date, :CLAIM_DATE + alias_attribute :ep_code, :EP_CODE + alias_attribute :suspense_date, :SUSPENSE_DATE + alias_attribute :suspense_reason_code, :SUSPENSE_REASON_CODE + alias_attribute :suspense_reason_comments, :SUSPENSE_REASON_COMMENTS + alias_attribute :claimant_person_id, :CLAIMANT_PERSON_ID + alias_attribute :contention_count, :CONTENTION_COUNT + alias_attribute :claim_soj, :CLAIM_SOJ + alias_attribute :temporary_claim_soj, :TEMPORARY_CLAIM_SOJ + alias_attribute :priority, :PRIORITY + alias_attribute :type_code, :TYPE_CODE + alias_attribute :lifecycle_status_name, :LIFECYCLE_STATUS_NAME + alias_attribute :level_status_code, :LEVEL_STATUS_CODE + alias_attribute :submitter_application_code, :SUBMITTER_APPLICATION_CODE + alias_attribute :submitter_role_code, :SUBMITTER_ROLE_CODE + alias_attribute :veteran_person_id, :VETERAN_PERSON_ID + alias_attribute :establishment_date, :ESTABLISHMENT_DATE + alias_attribute :intake_site, :INTAKE_SITE + alias_attribute :payee_code, :PAYEE_CODE + alias_attribute :sync_id, :SYNC_ID + alias_attribute :createddt, :CREATEDDT + alias_attribute :lastupdatedt, :LASTUPDATEDT + alias_attribute :expirationdt, :EXPIRATIONDT + alias_attribute :version, :VERSION + alias_attribute :lifecycle_status_change_date, :LIFECYCLE_STATUS_CHANGE_DATE + alias_attribute :rating_soj, :RATING_SOJ + alias_attribute :program_type_code, :PROGRAM_TYPE_CODE + alias_attribute :service_type_code, :SERVICE_TYPE_CODE + alias_attribute :prevent_audit_trig, :PREVENT_AUDIT_TRIG + alias_attribute :pre_discharge_type_code, :PRE_DISCHARGE_TYPE_CODE + alias_attribute :pre_discharge_ind, :PRE_DISCHARGE_IND + alias_attribute :organization_name, :ORGANIZATION_NAME + alias_attribute :organization_soj, :ORGANIZATION_SOJ + alias_attribute :allow_poa_access, :ALLOW_POA_ACCESS + alias_attribute :poa_code, :POA_CODE +end diff --git a/app/models/priority_queues/priority_end_product_sync_queue.rb b/app/models/priority_queues/priority_end_product_sync_queue.rb new file mode 100644 index 00000000000..d8cd0f73ba0 --- /dev/null +++ b/app/models/priority_queues/priority_end_product_sync_queue.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +# Model for Priority End Product Sync Queue table. +# This table consists of records of End Product Establishment IDs that need to be synced with VBMS. +class PriorityEndProductSyncQueue < CaseflowRecord + self.table_name = "priority_end_product_sync_queue" + + belongs_to :end_product_establishment + belongs_to :batch_process, foreign_key: "batch_id", primary_key: "batch_id" + has_many :caseflow_stuck_records, as: :stuck_record + + scope :batchable, -> { where(batch_id: [nil, BatchProcess.completed_batch_process_ids]) } + scope :ready_to_batch, lambda { + where("last_batched_at IS NULL OR last_batched_at <= ?", BatchProcess::ERROR_DELAY.hours.ago) + } + scope :batch_limit, -> { limit(BatchProcess::BATCH_LIMIT) } + scope :syncable, lambda { + where.not(status: [Constants.PRIORITY_EP_SYNC.synced, Constants.PRIORITY_EP_SYNC.stuck]) + } + + enum status: { + Constants.PRIORITY_EP_SYNC.not_processed.to_sym => Constants.PRIORITY_EP_SYNC.not_processed, + Constants.PRIORITY_EP_SYNC.pre_processing.to_sym => Constants.PRIORITY_EP_SYNC.pre_processing, + Constants.PRIORITY_EP_SYNC.processing.to_sym => Constants.PRIORITY_EP_SYNC.processing, + Constants.PRIORITY_EP_SYNC.synced.to_sym => Constants.PRIORITY_EP_SYNC.synced, + Constants.PRIORITY_EP_SYNC.error.to_sym => Constants.PRIORITY_EP_SYNC.error, + Constants.PRIORITY_EP_SYNC.stuck.to_sym => Constants.PRIORITY_EP_SYNC.stuck + } + + # Status Update methods + def status_processing! + update!(status: Constants.PRIORITY_EP_SYNC.processing) + end + + def status_sync! + update!(status: Constants.PRIORITY_EP_SYNC.synced) + end + + def status_error!(errors) + update!(status: Constants.PRIORITY_EP_SYNC.error, + error_messages: errors) + end + + # Method will update the status of the record to STUCK + # While also create a record within the caseflow_stuck_records table + # for later manual review. + def declare_record_stuck! + update!(status: Constants.PRIORITY_EP_SYNC.stuck) + stuck_record = CaseflowStuckRecord.create!(stuck_record: self, + error_messages: error_messages, + determined_stuck_at: Time.zone.now) + msg = "StuckRecordAlert::SyncFailed End Product Establishment ID: #{end_product_establishment_id}." + Raven.capture_message(msg, level: "error", extra: { caseflow_stuck_record_id: stuck_record.id, + batch_process_type: batch_process.class.name, + batch_id: batch_id, + queue_type: self.class.name, + queue_id: id, + end_product_establishment_id: end_product_establishment_id, + determined_stuck_at: stuck_record.determined_stuck_at }) + end +end diff --git a/client/constants/BATCH_PROCESS.json b/client/constants/BATCH_PROCESS.json new file mode 100644 index 00000000000..097ca76b4b2 --- /dev/null +++ b/client/constants/BATCH_PROCESS.json @@ -0,0 +1,5 @@ +{ + "pre_processing": "PRE_PROCESSING", + "processing": "PROCESSING", + "completed": "COMPLETED" +} diff --git a/client/constants/PRIORITY_EP_SYNC.json b/client/constants/PRIORITY_EP_SYNC.json new file mode 100644 index 00000000000..28d8c7a02c8 --- /dev/null +++ b/client/constants/PRIORITY_EP_SYNC.json @@ -0,0 +1,8 @@ +{ + "not_processed": "NOT_PROCESSED", + "pre_processing": "PRE_PROCESSING", + "processing": "PROCESSING", + "synced": "SYNCED", + "error": "ERROR", + "stuck": "STUCK" +} diff --git a/config/environments/demo.rb b/config/environments/demo.rb index f6d7574b65f..eb2df052944 100644 --- a/config/environments/demo.rb +++ b/config/environments/demo.rb @@ -82,6 +82,19 @@ ENV["DATABASE_CLEANER_ALLOW_REMOTE_DATABASE_URL"] ||= "true" + # BatchProcess ENVs + # priority_ep_sync + ENV["BATCH_PROCESS_JOB_DURATION"] ||= "1" # Number of hours the job will run for + ENV["BATCH_PROCESS_SLEEP_DURATION"] ||= "5" # Number of seconds between loop iterations + ENV["BATCH_PROCESS_BATCH_LIMIT"]||= "100" # Max number of records in a batch + ENV["BATCH_PROCESS_ERROR_DELAY"] ||= "12" # In number of hours + ENV["BATCH_PROCESS_MAX_ERRORS_BEFORE_STUCK"] ||= "3" # When record errors for X time, it's declared stuck + + # Populate End Product Sync Queue ENVs + ENV["END_PRODUCT_QUEUE_JOB_DURATION"] ||= "1" # Number of hours the job will run for + ENV["END_PRODUCT_QUEUE_SLEEP_DURATION"] ||= "5" # Number of seconds between loop iterations + ENV["END_PRODUCT_QUEUE_BATCH_LIMIT"] ||= "500" # Max number of records in a batch + # Setup S3 config.s3_enabled = ENV["AWS_BUCKET_NAME"].present? config.s3_bucket_name = ENV["AWS_BUCKET_NAME"] diff --git a/config/environments/development.rb b/config/environments/development.rb index bbd19dae9c5..9822cd7b692 100644 --- a/config/environments/development.rb +++ b/config/environments/development.rb @@ -80,6 +80,14 @@ ENV["AWS_ACCESS_KEY_ID"] ||= "dummykeyid" ENV["AWS_SECRET_ACCESS_KEY"] ||= "dummysecretkey" + # BatchProcess ENVs + # priority_ep_sync + ENV["BATCH_PROCESS_JOB_DURATION"] ||= "50" # Number of minutes the job will run for + ENV["BATCH_PROCESS_SLEEP_DURATION"] ||= "5" # Number of seconds between loop iterations + ENV["BATCH_PROCESS_BATCH_LIMIT"]||= "100" # Max number of records in a batch + ENV["BATCH_PROCESS_ERROR_DELAY"] ||= "3" # In number of hours + ENV["BATCH_PROCESS_MAX_ERRORS_BEFORE_STUCK"] ||= "3" # When record errors for X time, it's declared stuck + # Necessary vars needed to create virtual hearing links # Used by VirtualHearings::LinkService ENV["VIRTUAL_HEARING_PIN_KEY"] ||= "mysecretkey" @@ -92,6 +100,11 @@ # Quarterly Notifications Batch Sizes ENV["QUARTERLY_NOTIFICATIONS_JOB_BATCH_SIZE"] ||= "1000" + # Populate End Product Sync Queue ENVs + ENV["END_PRODUCT_QUEUE_JOB_DURATION"] ||= "50" # Number of minutes the job will run for + ENV["END_PRODUCT_QUEUE_SLEEP_DURATION"] ||= "5" # Number of seconds between loop iterations + ENV["END_PRODUCT_QUEUE_BATCH_LIMIT"] ||= "500" # Max number of records in a batch + # Travel Board Sync Batch Size ENV["TRAVEL_BOARD_HEARING_SYNC_BATCH_LIMIT"] ||= "250" diff --git a/config/environments/test.rb b/config/environments/test.rb index 1ea1f050d7b..fff81c6c7a2 100644 --- a/config/environments/test.rb +++ b/config/environments/test.rb @@ -89,6 +89,14 @@ ENV["AWS_ACCESS_KEY_ID"] ||= "dummykeyid" ENV["AWS_SECRET_ACCESS_KEY"] ||= "dummysecretkey" + # BatchProcess ENVs + # priority_ep_sync + ENV["BATCH_PROCESS_JOB_DURATION"] ||= "50" # Number of minutes the job will run for + ENV["BATCH_PROCESS_SLEEP_DURATION"] ||= "0" # Number of seconds between loop iterations + ENV["BATCH_PROCESS_BATCH_LIMIT"]||= "100" # Max number of records in a batch + ENV["BATCH_PROCESS_ERROR_DELAY"] ||= "3" # In number of hours + ENV["BATCH_PROCESS_MAX_ERRORS_BEFORE_STUCK"] ||= "3" # When record errors for X time, it's declared stuck + config.active_job.queue_adapter = :test # Disable SqlTracker from creating tmp/sql_tracker-*.json files -- https://github.com/steventen/sql_tracker/pull/10 @@ -107,6 +115,11 @@ # Quarterly Notifications Batch Sizes ENV["QUARTERLY_NOTIFICATIONS_JOB_BATCH_SIZE"] ||= "1000" + # Populate End Product Sync Queue ENVs + ENV["END_PRODUCT_QUEUE_JOB_DURATION"] ||= "50" # Number of minutes the job will run for + ENV["END_PRODUCT_QUEUE_SLEEP_DURATION"] ||= "0" # Number of seconds between loop iterations + ENV["END_PRODUCT_QUEUE_BATCH_LIMIT"] ||= "250" # Max number of records in a batch + # Travel Board Sync Batch Size ENV["TRAVEL_BOARD_HEARING_SYNC_BATCH_LIMIT"] ||= "250" diff --git a/config/initializers/redis_mutex.rb b/config/initializers/redis_mutex.rb new file mode 100644 index 00000000000..2bc65f75825 --- /dev/null +++ b/config/initializers/redis_mutex.rb @@ -0,0 +1 @@ +RedisClassy.redis = Redis.new(url: Rails.application.secrets.redis_url_cache) diff --git a/config/initializers/scheduled_jobs.rb b/config/initializers/scheduled_jobs.rb index 6f097bf1114..6d675840226 100644 --- a/config/initializers/scheduled_jobs.rb +++ b/config/initializers/scheduled_jobs.rb @@ -1,6 +1,11 @@ +require "./app/jobs/batch_processes/priority_ep_sync_batch_process_job.rb" +require "./app/jobs/batch_processes/batch_process_rescue_job.rb" + SCHEDULED_JOBS = { "amo_metrics_report" => AMOMetricsReportJob, "annual_metrics" => AnnualMetricsReportJob, + "priority_ep_sync_batch_process_job" => PriorityEpSyncBatchProcessJob, + "batch_process_rescue_job" => BatchProcessRescueJob, "calculate_dispatch_stats" => CalculateDispatchStatsJob, "create_establish_claim" => CreateEstablishClaimTasksJob, "data_integrity_checks" => DataIntegrityChecksJob, @@ -19,6 +24,7 @@ "monthly_metrics" => MonthlyMetricsReportJob, "nightly_syncs" => NightlySyncsJob, "out_of_service_reminder" => OutOfServiceReminderJob, + "populate_end_product_sync_queue" => PopulateEndProductSyncQueueJob, "prepare_establish_claim" => PrepareEstablishClaimTasksJob, "push_priority_appeals_to_judges" => PushPriorityAppealsToJudgesJob, "quarterly_metrics" => QuarterlyMetricsReportJob, diff --git a/db/migrate/20230531132301_create_priority_end_product_sync_queue.rb b/db/migrate/20230531132301_create_priority_end_product_sync_queue.rb new file mode 100644 index 00000000000..ae9fa3fd7b5 --- /dev/null +++ b/db/migrate/20230531132301_create_priority_end_product_sync_queue.rb @@ -0,0 +1,12 @@ +class CreatePriorityEndProductSyncQueue < Caseflow::Migration + def change + create_table :priority_end_product_sync_queue, comment: "Queue of End Product Establishments that need to sync with VBMS" do |t| + t.integer :end_product_establishment_id, unique: true, null: false, comment: "ID of end_product_establishment record to be synced" + t.uuid :batch_id, null: true, comment: "A unique UUID for the batch the record is executed with" + t.string :status, null: false, default: "NOT_PROCESSED", comment: "A status to indicate what state the record is in such as PROCESSING and PROCESSED" + t.timestamp :created_at, null: false, comment: "Date and Time the record was inserted into the queue" + t.timestamp :last_batched_at, null: true, comment: "Date and Time the record was last batched" + t.string :error_messages, array: true, default: [], comment: "Array of Error Message(s) containing Batch ID and specific error if a failure occurs" + end + end +end diff --git a/db/migrate/20230531142439_add_foreign_key_to_priority_end_product_sync_queue.rb b/db/migrate/20230531142439_add_foreign_key_to_priority_end_product_sync_queue.rb new file mode 100644 index 00000000000..0081532138e --- /dev/null +++ b/db/migrate/20230531142439_add_foreign_key_to_priority_end_product_sync_queue.rb @@ -0,0 +1,5 @@ +class AddForeignKeyToPriorityEndProductSyncQueue < Caseflow::Migration + def change + add_foreign_key :priority_end_product_sync_queue, :end_product_establishments, name: "priority_end_product_sync_queue_end_product_establishment_id_fk", validate: false + end +end diff --git a/db/migrate/20230531144855_add_indexes_to_priority_end_product_sync_queue.rb b/db/migrate/20230531144855_add_indexes_to_priority_end_product_sync_queue.rb new file mode 100644 index 00000000000..91616b778ec --- /dev/null +++ b/db/migrate/20230531144855_add_indexes_to_priority_end_product_sync_queue.rb @@ -0,0 +1,6 @@ +class AddIndexesToPriorityEndProductSyncQueue < Caseflow::Migration + def change + add_safe_index :priority_end_product_sync_queue, [:end_product_establishment_id], name: "index_priority_end_product_sync_queue_on_epe_id", unique: true + add_safe_index :priority_end_product_sync_queue, [:batch_id], name: "index_priority_end_product_sync_queue_on_batch_id", unique: false + end +end diff --git a/db/migrate/20230602143751_create_batch_processes.rb b/db/migrate/20230602143751_create_batch_processes.rb new file mode 100644 index 00000000000..8290d290b5a --- /dev/null +++ b/db/migrate/20230602143751_create_batch_processes.rb @@ -0,0 +1,14 @@ +class CreateBatchProcesses < Caseflow::Migration + def change + create_table :batch_processes, id: false, comment: "A generalized table for batching and processing records within caseflow" do |t| + t.uuid :batch_id, primary_key: true, unique: true, null: false, comment: "The unique id of the created batch" + t.string :state, default: "PRE_PROCESSING", null: false, comment: "The state that the batch is currently in. PRE_PROCESSING, PROCESSING, PROCESSED" + t.string :batch_type, null: false, comment: "Indicates what type of record is being batched" + t.timestamp :started_at, comment: "The date/time that the batch began processing" + t.timestamp :ended_at, comment: "The date/time that the batch finsished processing" + t.integer :records_attempted, default: 0, comment: "The number of records in the batch attempting to be processed" + t.integer :records_completed, default: 0, comment: "The number of records in the batch that completed processing successfully" + t.integer :records_failed, default: 0, comment: "The number of records in the batch that failed processing" + end + end +end diff --git a/db/migrate/20230602175207_add_indexes_to_batch_processes.rb b/db/migrate/20230602175207_add_indexes_to_batch_processes.rb new file mode 100644 index 00000000000..0ab0fbc278a --- /dev/null +++ b/db/migrate/20230602175207_add_indexes_to_batch_processes.rb @@ -0,0 +1,7 @@ +class AddIndexesToBatchProcesses < Caseflow::Migration + def change + add_safe_index :batch_processes, [:state], name: "index_batch_processes_on_state", unique: false + add_safe_index :batch_processes, [:batch_type], name: "index_batch_processes_on_batch_type", unique: false + add_safe_index :batch_processes, [:records_failed], name: "index_batch_processes_on_records_failed", unique: false + end +end diff --git a/db/migrate/20230602201048_create_caseflow_stuck_records.rb b/db/migrate/20230602201048_create_caseflow_stuck_records.rb new file mode 100644 index 00000000000..f082dc57d45 --- /dev/null +++ b/db/migrate/20230602201048_create_caseflow_stuck_records.rb @@ -0,0 +1,9 @@ +class CreateCaseflowStuckRecords < Caseflow::Migration + def change + create_table :caseflow_stuck_records do |t| + t.references :stuck_record, polymorphic: true, index: { name: 'index_caseflow_stuck_records_on_stuck_record_id_and_type' }, null: false, comment: "The id / primary key of the stuck record and the type / where the record came from" + t.string :error_messages, array: true, default: [], comment: "Array of Error Message(s) containing Batch ID and specific error if a failure occurs" + t.timestamp :determined_stuck_at, null: false, comment: "The date/time at which the record in question was determined to be stuck." + end + end +end diff --git a/db/migrate/20230608192149_add_comment_to_caseflow_stuck_records.rb b/db/migrate/20230608192149_add_comment_to_caseflow_stuck_records.rb new file mode 100644 index 00000000000..3a4a05d1f59 --- /dev/null +++ b/db/migrate/20230608192149_add_comment_to_caseflow_stuck_records.rb @@ -0,0 +1,5 @@ +class AddCommentToCaseflowStuckRecords < Caseflow::Migration + def change + change_table_comment :caseflow_stuck_records, "This is a polymorphic table consisting of records that have repeatedly errored out of the syncing process. Currently, the only records on this table come from the PriorityEndProductSyncQueue table." + end +end diff --git a/db/migrate/20230626212036_add_default_uuid_for_batch_processes.rb b/db/migrate/20230626212036_add_default_uuid_for_batch_processes.rb new file mode 100644 index 00000000000..3738df1a588 --- /dev/null +++ b/db/migrate/20230626212036_add_default_uuid_for_batch_processes.rb @@ -0,0 +1,5 @@ +class AddDefaultUuidForBatchProcesses < Caseflow::Migration + def change + change_column_default :batch_processes, :batch_id, from: nil, to: "uuid_generate_v4()" + end +end diff --git a/db/migrate/20230626213334_add_batch_foreign_key_to_priority_end_product_sync_queue.rb b/db/migrate/20230626213334_add_batch_foreign_key_to_priority_end_product_sync_queue.rb new file mode 100644 index 00000000000..ed9fafb731d --- /dev/null +++ b/db/migrate/20230626213334_add_batch_foreign_key_to_priority_end_product_sync_queue.rb @@ -0,0 +1,5 @@ +class AddBatchForeignKeyToPriorityEndProductSyncQueue < Caseflow::Migration + def change + add_foreign_key :priority_end_product_sync_queue, :batch_processes, column: "batch_id", primary_key: "batch_id", name: "priority_end_product_sync_queue_batch_processes_id_fk", validate: false + end +end diff --git a/db/migrate/20230630134611_add_index_on_end_product_establishment_reference_id.rb b/db/migrate/20230630134611_add_index_on_end_product_establishment_reference_id.rb new file mode 100644 index 00000000000..06dd44dc910 --- /dev/null +++ b/db/migrate/20230630134611_add_index_on_end_product_establishment_reference_id.rb @@ -0,0 +1,5 @@ +class AddIndexOnEndProductEstablishmentReferenceId < Caseflow::Migration + def change + add_safe_index :end_product_establishments, :reference_id + end +end diff --git a/db/migrate/20230711153345_add_created_at_and_updated_at_columns_to_batch_processes.rb b/db/migrate/20230711153345_add_created_at_and_updated_at_columns_to_batch_processes.rb new file mode 100644 index 00000000000..64cd52f534c --- /dev/null +++ b/db/migrate/20230711153345_add_created_at_and_updated_at_columns_to_batch_processes.rb @@ -0,0 +1,6 @@ +class AddCreatedAtAndUpdatedAtColumnsToBatchProcesses < Caseflow::Migration + def change + add_column :batch_processes, :created_at, :datetime, null: false, comment: "Date and Time that batch was created." + add_column :batch_processes, :updated_at, :datetime, null: false, comment: "Date and Time that batch was last updated." + end +end diff --git a/db/migrate/20230711153536_add_updated_at_column_to_priority_end_product_sync_queue.rb b/db/migrate/20230711153536_add_updated_at_column_to_priority_end_product_sync_queue.rb new file mode 100644 index 00000000000..8adff954fb7 --- /dev/null +++ b/db/migrate/20230711153536_add_updated_at_column_to_priority_end_product_sync_queue.rb @@ -0,0 +1,5 @@ +class AddUpdatedAtColumnToPriorityEndProductSyncQueue < Caseflow::Migration + def change + add_column :priority_end_product_sync_queue, :updated_at, :datetime, null: false, comment: "Date and Time the record was last updated." + end +end diff --git a/db/migrate/20230711153654_add_index_on_last_batched_at_and_status_to_priority_end_product_sync_queue.rb b/db/migrate/20230711153654_add_index_on_last_batched_at_and_status_to_priority_end_product_sync_queue.rb new file mode 100644 index 00000000000..3a9bbe7a12f --- /dev/null +++ b/db/migrate/20230711153654_add_index_on_last_batched_at_and_status_to_priority_end_product_sync_queue.rb @@ -0,0 +1,6 @@ +class AddIndexOnLastBatchedAtAndStatusToPriorityEndProductSyncQueue < Caseflow::Migration + def change + add_safe_index :priority_end_product_sync_queue, [:last_batched_at], name: "index_priority_ep_sync_queue_on_last_batched_at", unique: false + add_safe_index :priority_end_product_sync_queue, [:status], name: "index_priority_ep_sync_queue_on_status", unique: false + end +end diff --git a/db/migrate/20230801195310_add_columns_to_caseflow_stuck_records.rb b/db/migrate/20230801195310_add_columns_to_caseflow_stuck_records.rb new file mode 100644 index 00000000000..e7706b2db81 --- /dev/null +++ b/db/migrate/20230801195310_add_columns_to_caseflow_stuck_records.rb @@ -0,0 +1,7 @@ +class AddColumnsToCaseflowStuckRecords < Caseflow::Migration + def change + add_column :caseflow_stuck_records, :remediated, :boolean, default: false, null: false, comment: "Reflects if the stuck record has been reviewed and fixed" + add_column :caseflow_stuck_records, :remediation_notes, :text, comment: "Brief description of the encountered issue and remediation strategy" + add_column :caseflow_stuck_records, :updated_at, :datetime, comment: "The time an update occurred on the record" + end +end diff --git a/db/schema.rb b/db/schema.rb index 2f02f9c82e3..71f378159e5 100644 --- a/db/schema.rb +++ b/db/schema.rb @@ -10,7 +10,7 @@ # # It's strongly recommended that you check this file into your version control system. -ActiveRecord::Schema.define(version: 2023_07_31_194341) do +ActiveRecord::Schema.define(version: 2023_08_01_195310) do # These are extensions that must be enabled in order to support this database enable_extension "plpgsql" @@ -220,6 +220,21 @@ t.index ["veteran_file_number"], name: "index_available_hearing_locations_on_veteran_file_number" end + create_table "batch_processes", primary_key: "batch_id", id: :uuid, default: -> { "uuid_generate_v4()" }, comment: "A generalized table for batching and processing records within caseflow", force: :cascade do |t| + t.string "batch_type", null: false, comment: "Indicates what type of record is being batched" + t.datetime "created_at", null: false, comment: "Date and Time that batch was created." + t.datetime "ended_at", comment: "The date/time that the batch finsished processing" + t.integer "records_attempted", default: 0, comment: "The number of records in the batch attempting to be processed" + t.integer "records_completed", default: 0, comment: "The number of records in the batch that completed processing successfully" + t.integer "records_failed", default: 0, comment: "The number of records in the batch that failed processing" + t.datetime "started_at", comment: "The date/time that the batch began processing" + t.string "state", default: "PRE_PROCESSING", null: false, comment: "The state that the batch is currently in. PRE_PROCESSING, PROCESSING, PROCESSED" + t.datetime "updated_at", null: false, comment: "Date and Time that batch was last updated." + t.index ["batch_type"], name: "index_batch_processes_on_batch_type" + t.index ["records_failed"], name: "index_batch_processes_on_records_failed" + t.index ["state"], name: "index_batch_processes_on_state" + end + create_table "bgs_attorneys", comment: "Cache of unique BGS attorney data — used for adding claimants to cases pulled from POA data", force: :cascade do |t| t.datetime "created_at", null: false, comment: "Standard created_at/updated_at timestamps" t.datetime "last_synced_at", comment: "The last time BGS was checked" @@ -330,6 +345,17 @@ t.index ["updated_at"], name: "index_cached_user_attributes_on_updated_at" end + create_table "caseflow_stuck_records", comment: "This is a polymorphic table consisting of records that have repeatedly errored out of the syncing process. Currently, the only records on this table come from the PriorityEndProductSyncQueue table.", force: :cascade do |t| + t.datetime "determined_stuck_at", null: false, comment: "The date/time at which the record in question was determined to be stuck." + t.string "error_messages", default: [], comment: "Array of Error Message(s) containing Batch ID and specific error if a failure occurs", array: true + t.boolean "remediated", default: false, null: false, comment: "Reflects if the stuck record has been reviewed and fixed" + t.text "remediation_notes", comment: "Brief description of the encountered issue and remediation strategy" + t.bigint "stuck_record_id", null: false, comment: "The id / primary key of the stuck record and the type / where the record came from" + t.string "stuck_record_type", null: false + t.datetime "updated_at", comment: "The time an update occurred on the record" + t.index ["stuck_record_type", "stuck_record_id"], name: "index_caseflow_stuck_records_on_stuck_record_id_and_type" + end + create_table "cavc_dashboard_dispositions", force: :cascade do |t| t.bigint "cavc_dashboard_id", comment: "ID of the associated CAVC Dashboard" t.bigint "cavc_dashboard_issue_id" @@ -765,6 +791,7 @@ t.datetime "updated_at" t.integer "user_id", comment: "The ID of the user who performed the decision review intake." t.string "veteran_file_number", null: false, comment: "PII. The file number of the Veteran submitted when establishing the end product." + t.index ["reference_id"], name: "index_end_product_establishments_on_reference_id" t.index ["source_type", "source_id"], name: "index_end_product_establishments_on_source_type_and_source_id" t.index ["updated_at"], name: "index_end_product_establishments_on_updated_at" t.index ["user_id"], name: "index_end_product_establishments_on_user_id" @@ -1344,6 +1371,20 @@ t.index ["updated_at"], name: "index_post_decision_motions_on_updated_at" end + create_table "priority_end_product_sync_queue", comment: "Queue of End Product Establishments that need to sync with VBMS", force: :cascade do |t| + t.uuid "batch_id", comment: "A unique UUID for the batch the record is executed with" + t.datetime "created_at", null: false, comment: "Date and Time the record was inserted into the queue" + t.integer "end_product_establishment_id", null: false, comment: "ID of end_product_establishment record to be synced" + t.string "error_messages", default: [], comment: "Array of Error Message(s) containing Batch ID and specific error if a failure occurs", array: true + t.datetime "last_batched_at", comment: "Date and Time the record was last batched" + t.string "status", default: "NOT_PROCESSED", null: false, comment: "A status to indicate what state the record is in such as PROCESSING and PROCESSED" + t.datetime "updated_at", null: false, comment: "Date and Time the record was last updated." + t.index ["batch_id"], name: "index_priority_end_product_sync_queue_on_batch_id" + t.index ["end_product_establishment_id"], name: "index_priority_end_product_sync_queue_on_epe_id", unique: true + t.index ["last_batched_at"], name: "index_priority_ep_sync_queue_on_last_batched_at" + t.index ["status"], name: "index_priority_ep_sync_queue_on_status" + end + create_table "ramp_closed_appeals", id: :serial, comment: "Keeps track of legacy appeals that are closed or partially closed in VACOLS due to being transitioned to a RAMP election. This data can be used to rollback the RAMP Election if needed.", force: :cascade do |t| t.datetime "closed_on", comment: "The datetime that the legacy appeal was closed in VACOLS and opted into RAMP." t.datetime "created_at" @@ -2104,6 +2145,8 @@ add_foreign_key "organizations_users", "users" add_foreign_key "post_decision_motions", "appeals" add_foreign_key "post_decision_motions", "tasks" + add_foreign_key "priority_end_product_sync_queue", "batch_processes", column: "batch_id", primary_key: "batch_id", name: "priority_end_product_sync_queue_batch_processes_id_fk" + add_foreign_key "priority_end_product_sync_queue", "end_product_establishments", name: "priority_end_product_sync_queue_end_product_establishment_id_fk" add_foreign_key "ramp_closed_appeals", "ramp_elections" add_foreign_key "ramp_election_rollbacks", "ramp_elections" add_foreign_key "ramp_election_rollbacks", "users" diff --git a/db/scripts/audit/functions/add_row_to_priority_end_product_sync_queue_audit_table_function.rb b/db/scripts/audit/functions/add_row_to_priority_end_product_sync_queue_audit_table_function.rb new file mode 100644 index 00000000000..510e5047b91 --- /dev/null +++ b/db/scripts/audit/functions/add_row_to_priority_end_product_sync_queue_audit_table_function.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require "pg" + +conn = CaseflowRecord.connection +conn.execute("CREATE OR REPLACE FUNCTION caseflow_audit.add_row_to_priority_end_product_sync_queue_audit() +RETURNS trigger +LANGUAGE plpgsql +AS $function$ +begin + if (TG_OP = 'DELETE') then + insert into caseflow_audit.priority_end_product_sync_queue_audit + select + nextval('caseflow_audit.priority_end_product_sync_queue_audit_id_seq'::regclass), + 'D', + OLD.id, + OLD.end_product_establishment_id, + OLD.batch_id, + OLD.status, + OLD.created_at, + OLD.last_batched_at, + CURRENT_TIMESTAMP, + OLD.error_messages; + elsif (TG_OP = 'UPDATE') then + insert into caseflow_audit.priority_end_product_sync_queue_audit + select + nextval('caseflow_audit.priority_end_product_sync_queue_audit_id_seq'::regclass), + 'U', + NEW.id, + NEW.end_product_establishment_id, + NEW.batch_id, + NEW.status, + NEW.created_at, + NEW.last_batched_at, + CURRENT_TIMESTAMP, + NEW.error_messages; + elsif (TG_OP = 'INSERT') then + insert into caseflow_audit.priority_end_product_sync_queue_audit + select + nextval('caseflow_audit.priority_end_product_sync_queue_audit_id_seq'::regclass), + 'I', + NEW.id, + NEW.end_product_establishment_id, + NEW.batch_id, + NEW.status, + NEW.created_at, + NEW.last_batched_at, + CURRENT_TIMESTAMP, + NEW.error_messages; + end if; + return null; +end; +$function$ +;") +conn.close diff --git a/db/scripts/audit/functions/add_row_to_priority_end_product_sync_queue_audit_table_function.sql b/db/scripts/audit/functions/add_row_to_priority_end_product_sync_queue_audit_table_function.sql new file mode 100644 index 00000000000..c80700feabf --- /dev/null +++ b/db/scripts/audit/functions/add_row_to_priority_end_product_sync_queue_audit_table_function.sql @@ -0,0 +1,49 @@ +CREATE OR REPLACE FUNCTION caseflow_audit.add_row_to_priority_end_product_sync_queue_audit() +RETURNS trigger +LANGUAGE plpgsql +AS $function$ +begin + if (TG_OP = 'DELETE') then + insert into caseflow_audit.priority_end_product_sync_queue_audit + select + nextval('caseflow_audit.priority_end_product_sync_queue_audit_id_seq'::regclass), + 'D', + OLD.id, + OLD.end_product_establishment_id, + OLD.batch_id, + OLD.status, + OLD.created_at, + OLD.last_batched_at, + CURRENT_TIMESTAMP, + OLD.error_messages; + elsif (TG_OP = 'UPDATE') then + insert into caseflow_audit.priority_end_product_sync_queue_audit + select + nextval('caseflow_audit.priority_end_product_sync_queue_audit_id_seq'::regclass), + 'U', + NEW.id, + NEW.end_product_establishment_id, + NEW.batch_id, + NEW.status, + NEW.created_at, + NEW.last_batched_at, + CURRENT_TIMESTAMP, + NEW.error_messages; + elsif (TG_OP = 'INSERT') then + insert into caseflow_audit.priority_end_product_sync_queue_audit + select + nextval('caseflow_audit.priority_end_product_sync_queue_audit_id_seq'::regclass), + 'I', + NEW.id, + NEW.end_product_establishment_id, + NEW.batch_id, + NEW.status, + NEW.created_at, + NEW.last_batched_at, + CURRENT_TIMESTAMP, + NEW.error_messages; + end if; + return null; +end; +$function$ +; diff --git a/db/scripts/audit/functions/drop_add_row_to_priority_end_product_sync_queue_audit_table_function.rb b/db/scripts/audit/functions/drop_add_row_to_priority_end_product_sync_queue_audit_table_function.rb new file mode 100644 index 00000000000..e4a4b41001e --- /dev/null +++ b/db/scripts/audit/functions/drop_add_row_to_priority_end_product_sync_queue_audit_table_function.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "pg" + +conn = CaseflowRecord.connection +conn.execute("DROP FUNCTION IF EXISTS caseflow_audit.add_row_to_priority_end_product_sync_queue_audit();") +conn.close diff --git a/db/scripts/audit/functions/drop_add_row_to_priority_end_product_sync_queue_audit_table_function.rb.sql b/db/scripts/audit/functions/drop_add_row_to_priority_end_product_sync_queue_audit_table_function.rb.sql new file mode 100644 index 00000000000..8e15ddcb336 --- /dev/null +++ b/db/scripts/audit/functions/drop_add_row_to_priority_end_product_sync_queue_audit_table_function.rb.sql @@ -0,0 +1 @@ +DROP FUNCTION IF EXISTS caseflow_audit.add_row_to_priority_end_product_sync_queue_audit(); diff --git a/db/scripts/audit/tables/create_priority_end_product_sync_queue_audit.rb b/db/scripts/audit/tables/create_priority_end_product_sync_queue_audit.rb new file mode 100644 index 00000000000..65d87f32d29 --- /dev/null +++ b/db/scripts/audit/tables/create_priority_end_product_sync_queue_audit.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +require "pg" + +conn = CaseflowRecord.connection +conn.execute("CREATE TABLE CASEFLOW_AUDIT.PRIORITY_END_PRODUCT_SYNC_QUEUE_AUDIT ( + ID BIGSERIAL PRIMARY KEY UNIQUE NOT NULL, + TYPE_OF_CHANGE CHAR(1) NOT NULL, + PRIORITY_END_PRODUCT_SYNC_QUEUE_ID BIGINT NOT NULL, + END_PRODUCT_ESTABLISHMENT_ID BIGINT NOT NULL REFERENCES END_PRODUCT_ESTABLISHMENTS(ID), + BATCH_ID UUID REFERENCES BATCH_PROCESSES(BATCH_ID), + STATUS VARCHAR(50) NOT NULL, + CREATED_AT TIMESTAMP WITHOUT TIME ZONE, + LAST_BATCHED_AT TIMESTAMP WITHOUT TIME ZONE, + AUDIT_CREATED_AT TIMESTAMP WITHOUT TIME ZONE DEFAULT NOW(), + ERROR_MESSAGES TEXT[] + );") +conn.close diff --git a/db/scripts/audit/tables/create_priority_end_product_sync_queue_audit.sql b/db/scripts/audit/tables/create_priority_end_product_sync_queue_audit.sql new file mode 100644 index 00000000000..c8a96b5f14f --- /dev/null +++ b/db/scripts/audit/tables/create_priority_end_product_sync_queue_audit.sql @@ -0,0 +1,12 @@ +CREATE TABLE CASEFLOW_AUDIT.PRIORITY_END_PRODUCT_SYNC_QUEUE_AUDIT ( + ID BIGSERIAL PRIMARY KEY UNIQUE NOT NULL, + TYPE_OF_CHANGE CHAR(1) NOT NULL, + PRIORITY_END_PRODUCT_SYNC_QUEUE_ID BIGINT NOT NULL, + END_PRODUCT_ESTABLISHMENT_ID BIGINT NOT NULL REFERENCES END_PRODUCT_ESTABLISHMENTS(ID), + BATCH_ID UUID REFERENCES BATCH_PROCESSES(BATCH_ID), + STATUS VARCHAR(50) NOT NULL, + CREATED_AT TIMESTAMP WITHOUT TIME ZONE, + LAST_BATCHED_AT TIMESTAMP WITHOUT TIME ZONE, + AUDIT_CREATED_AT TIMESTAMP WITHOUT TIME ZONE DEFAULT NOW(), + ERROR_MESSAGES TEXT[] +); diff --git a/db/scripts/audit/tables/drop_priority_end_product_sync_queue_audit.rb b/db/scripts/audit/tables/drop_priority_end_product_sync_queue_audit.rb new file mode 100644 index 00000000000..e216a9ccc5c --- /dev/null +++ b/db/scripts/audit/tables/drop_priority_end_product_sync_queue_audit.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "pg" + +conn = CaseflowRecord.connection +conn.execute("DROP TABLE IF EXISTS CASEFLOW_AUDIT.PRIORITY_END_PRODUCT_SYNC_QUEUE_AUDIT;") +conn.close diff --git a/db/scripts/audit/tables/drop_priority_end_product_sync_queue_audit.sql b/db/scripts/audit/tables/drop_priority_end_product_sync_queue_audit.sql new file mode 100644 index 00000000000..4526910f48f --- /dev/null +++ b/db/scripts/audit/tables/drop_priority_end_product_sync_queue_audit.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS CASEFLOW_AUDIT.PRIORITY_END_PRODUCT_SYNC_QUEUE_AUDIT; diff --git a/db/scripts/audit/triggers/create_priority_end_product_sync_queue_audit_trigger.rb b/db/scripts/audit/triggers/create_priority_end_product_sync_queue_audit_trigger.rb new file mode 100644 index 00000000000..225455e279f --- /dev/null +++ b/db/scripts/audit/triggers/create_priority_end_product_sync_queue_audit_trigger.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +require "pg" + +conn = CaseflowRecord.connection +conn.execute( + "create trigger priority_end_product_sync_queue_audit_trigger + after insert or update or delete on public.priority_end_product_sync_queue + for each row + execute procedure caseflow_audit.add_row_to_priority_end_product_sync_queue_audit();" +) +conn.close diff --git a/db/scripts/audit/triggers/create_priority_end_product_sync_queue_audit_trigger.sql b/db/scripts/audit/triggers/create_priority_end_product_sync_queue_audit_trigger.sql new file mode 100644 index 00000000000..d3a436b74d9 --- /dev/null +++ b/db/scripts/audit/triggers/create_priority_end_product_sync_queue_audit_trigger.sql @@ -0,0 +1,4 @@ +create trigger priority_end_product_sync_queue_audit_trigger +after insert or update or delete on public.priority_end_product_sync_queue +for each row +execute procedure caseflow_audit.add_row_to_priority_end_product_sync_queue_audit(); diff --git a/db/scripts/audit/triggers/drop_priority_end_product_sync_queue_audit_trigger.rb b/db/scripts/audit/triggers/drop_priority_end_product_sync_queue_audit_trigger.rb new file mode 100644 index 00000000000..7d519541c20 --- /dev/null +++ b/db/scripts/audit/triggers/drop_priority_end_product_sync_queue_audit_trigger.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "pg" + +conn = CaseflowRecord.connection +conn.execute("DROP TRIGGER IF EXISTS priority_end_product_sync_queue_audit_trigger ON public.priority_end_product_sync_queue;") +conn.close diff --git a/db/scripts/audit/triggers/drop_priority_end_product_sync_queue_audit_trigger.sql b/db/scripts/audit/triggers/drop_priority_end_product_sync_queue_audit_trigger.sql new file mode 100644 index 00000000000..dc7c1d9d033 --- /dev/null +++ b/db/scripts/audit/triggers/drop_priority_end_product_sync_queue_audit_trigger.sql @@ -0,0 +1 @@ +DROP TRIGGER IF EXISTS priority_end_product_sync_queue_audit_trigger ON public.priority_end_product_sync_queue; diff --git a/db/scripts/external/create_vbms_ext_claim_table.rb b/db/scripts/external/create_vbms_ext_claim_table.rb new file mode 100644 index 00000000000..3a1a37e2470 --- /dev/null +++ b/db/scripts/external/create_vbms_ext_claim_table.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: true + +require "pg" + +conn = CaseflowRecord.connection +conn.execute('CREATE TABLE IF NOT EXISTS public.vbms_ext_claim ( + "CLAIM_ID" numeric(38,0) primary key unique NOT null, + "CLAIM_DATE" timestamp without time zone, + "EP_CODE" character varying(25), + "SUSPENSE_DATE" timestamp without time zone, + "SUSPENSE_REASON_CODE" character varying(25), + "SUSPENSE_REASON_COMMENTS" character varying(1000), + "CLAIMANT_PERSON_ID" numeric(38,0), + "CONTENTION_COUNT" integer, + "CLAIM_SOJ" character varying(25), + "TEMPORARY_CLAIM_SOJ" character varying(25), + "PRIORITY" character varying(10), + "TYPE_CODE" character varying(25), + "LIFECYCLE_STATUS_NAME" character varying(50), + "LEVEL_STATUS_CODE" character varying(25), + "SUBMITTER_APPLICATION_CODE" character varying(25), + "SUBMITTER_ROLE_CODE" character varying(25), + "VETERAN_PERSON_ID" numeric(15,0), + "ESTABLISHMENT_DATE" timestamp without time zone, + "INTAKE_SITE" character varying(25), + "PAYEE_CODE" character varying(25), + "SYNC_ID" numeric(38,0) NOT null, + "CREATEDDT" timestamp without time zone NOT null default NULL, + "LASTUPDATEDT" timestamp without time zone NOT null default NULL, + "EXPIRATIONDT" timestamp without time zone, + "VERSION" numeric(38,0) NOT null default NULL, + "LIFECYCLE_STATUS_CHANGE_DATE" timestamp without time zone, + "RATING_SOJ" character varying(25), + "PROGRAM_TYPE_CODE" character varying(10), + "SERVICE_TYPE_CODE" character varying(10), + "PREVENT_AUDIT_TRIG" smallint NOT null default 0, + "PRE_DISCHARGE_TYPE_CODE" character varying(10), + "PRE_DISCHARGE_IND" character varying(5), + "ORGANIZATION_NAME" character varying(100), + "ORGANIZATION_SOJ" character varying(25), + "ALLOW_POA_ACCESS" character varying(5), + "POA_CODE" character varying(25) + );') + +conn.execute('CREATE INDEX IF NOT EXISTS claim_id_index ON public.vbms_ext_claim ("CLAIM_ID")') +conn.execute('CREATE INDEX IF NOT EXISTS level_status_code_index ON public.vbms_ext_claim ("LEVEL_STATUS_CODE")') +conn.close diff --git a/db/scripts/external/create_vbms_ext_claim_table.sql b/db/scripts/external/create_vbms_ext_claim_table.sql new file mode 100644 index 00000000000..02a6e0f356e --- /dev/null +++ b/db/scripts/external/create_vbms_ext_claim_table.sql @@ -0,0 +1,42 @@ +CREATE TABLE IF NOT EXISTS PUBLIC.VBMS_EXT_CLAIM ( + "CLAIM_ID" NUMERIC(38, 0) PRIMARY KEY UNIQUE NOT NULL, + "CLAIM_DATE" TIMESTAMP WITHOUT TIME ZONE, + "EP_CODE" CHARACTER VARYING(25), + "SUSPENSE_DATE" TIMESTAMP WITHOUT TIME ZONE, + "SUSPENSE_REASON_CODE" CHARACTER VARYING(25), + "SUSPENSE_REASON_COMMENTS" CHARACTER VARYING(1000), + "CLAIMANT_PERSON_ID" NUMERIC(38, 0), + "CONTENTION_COUNT" INTEGER, + "CLAIM_SOJ" CHARACTER VARYING(25), + "TEMPORARY_CLAIM_SOJ" CHARACTER VARYING(25), + "PRIORITY" CHARACTER VARYING(10), + "TYPE_CODE" CHARACTER VARYING(25), + "LIFECYCLE_STATUS_NAME" CHARACTER VARYING(50), + "LEVEL_STATUS_CODE" CHARACTER VARYING(25), + "SUBMITTER_APPLICATION_CODE" CHARACTER VARYING(25), + "SUBMITTER_ROLE_CODE" CHARACTER VARYING(25), + "VETERAN_PERSON_ID" NUMERIC(15, 0), + "ESTABLISHMENT_DATE" TIMESTAMP WITHOUT TIME ZONE, + "INTAKE_SITE" CHARACTER VARYING(25), + "PAYEE_CODE" CHARACTER VARYING(25), + "SYNC_ID" NUMERIC(38, 0) NOT NULL, + "CREATEDDT" TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT NULL, + "LASTUPDATEDT" TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT NULL, + "EXPIRATIONDT" TIMESTAMP WITHOUT TIME ZONE, + "VERSION" NUMERIC(38, 0) NOT NULL DEFAULT NULL, + "LIFECYCLE_STATUS_CHANGE_DATE" TIMESTAMP WITHOUT TIME ZONE, + "RATING_SOJ" CHARACTER VARYING(25), + "PROGRAM_TYPE_CODE" CHARACTER VARYING(10), + "SERVICE_TYPE_CODE" CHARACTER VARYING(10), + "PREVENT_AUDIT_TRIG" SMALLINT NOT NULL DEFAULT 0, + "PRE_DISCHARGE_TYPE_CODE" CHARACTER VARYING(10), + "PRE_DISCHARGE_IND" CHARACTER VARYING(5), + "ORGANIZATION_NAME" CHARACTER VARYING(100), + "ORGANIZATION_SOJ" CHARACTER VARYING(25), + "ALLOW_POA_ACCESS" CHARACTER VARYING(5), + "POA_CODE" CHARACTER VARYING(25) +); + +CREATE INDEX IF NOT EXISTS CLAIM_ID_INDEX ON PUBLIC.VBMS_EXT_CLAIM ("CLAIM_ID"); + +CREATE INDEX IF NOT EXISTS LEVEL_STATUS_CODE_INDEX ON PUBLIC.VBMS_EXT_CLAIM ("LEVEL_STATUS_CODE"); diff --git a/db/scripts/external/remove_vbms_ext_claim_seeds.rb b/db/scripts/external/remove_vbms_ext_claim_seeds.rb new file mode 100644 index 00000000000..f56aecab40a --- /dev/null +++ b/db/scripts/external/remove_vbms_ext_claim_seeds.rb @@ -0,0 +1,40 @@ +# frozen_string_literal: true + +require "pg" + +conn = CaseflowRecord.connection +conn.execute( + "DELETE FROM PRIORITY_END_PRODUCT_SYNC_QUEUE; + + DELETE FROM BATCH_PROCESSES; + + DELETE FROM VBMS_EXT_CLAIM; + + DELETE FROM REQUEST_ISSUES + WHERE + EXISTS( + SELECT + * + FROM + END_PRODUCT_ESTABLISHMENTS EPE + WHERE + END_PRODUCT_ESTABLISHMENT_ID = EPE.ID + AND VETERAN_FILE_NUMBER LIKE '0003%' + ); + + DELETE FROM HIGHER_LEVEL_REVIEWS + WHERE + VETERAN_FILE_NUMBER LIKE '0003%'; + + DELETE FROM SUPPLEMENTAL_CLAIMS + WHERE + VETERAN_FILE_NUMBER LIKE '0003%'; + + DELETE FROM END_PRODUCT_ESTABLISHMENTS + WHERE + VETERAN_FILE_NUMBER LIKE '0003%'; + + DELETE FROM VETERANS + WHERE + FILE_NUMBER LIKE '0003%';" +) diff --git a/db/scripts/external/remove_vbms_ext_claim_seeds.sql b/db/scripts/external/remove_vbms_ext_claim_seeds.sql new file mode 100644 index 00000000000..b4e1e5c609e --- /dev/null +++ b/db/scripts/external/remove_vbms_ext_claim_seeds.sql @@ -0,0 +1,33 @@ +DELETE FROM PRIORITY_END_PRODUCT_SYNC_QUEUE; + +DELETE FROM BATCH_PROCESSES; + +DELETE FROM VBMS_EXT_CLAIM; + +DELETE FROM REQUEST_ISSUES +WHERE + EXISTS( + SELECT + * + FROM + END_PRODUCT_ESTABLISHMENTS EPE + WHERE + END_PRODUCT_ESTABLISHMENT_ID = EPE.ID + AND VETERAN_FILE_NUMBER LIKE '0003%' + ); + +DELETE FROM HIGHER_LEVEL_REVIEWS +WHERE + VETERAN_FILE_NUMBER LIKE '0003%'; + +DELETE FROM SUPPLEMENTAL_CLAIMS +WHERE + VETERAN_FILE_NUMBER LIKE '0003%'; + +DELETE FROM END_PRODUCT_ESTABLISHMENTS +WHERE + VETERAN_FILE_NUMBER LIKE '0003%'; + +DELETE FROM VETERANS +WHERE + FILE_NUMBER LIKE '0003%'; diff --git a/db/scripts/external/remove_vbms_ext_claim_table.rb b/db/scripts/external/remove_vbms_ext_claim_table.rb new file mode 100644 index 00000000000..192de6a0f15 --- /dev/null +++ b/db/scripts/external/remove_vbms_ext_claim_table.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +require "pg" + +conn = CaseflowRecord.connection +conn.execute( + "drop table IF EXISTS public.vbms_ext_claim;" +) diff --git a/db/scripts/external/remove_vbms_ext_claim_table.sql b/db/scripts/external/remove_vbms_ext_claim_table.sql new file mode 100644 index 00000000000..b9e2d7f4c54 --- /dev/null +++ b/db/scripts/external/remove_vbms_ext_claim_table.sql @@ -0,0 +1 @@ +drop table IF EXISTS public.vbms_ext_claim; diff --git a/db/seeds.rb b/db/seeds.rb index 00a949eebc3..666990f8056 100644 --- a/db/seeds.rb +++ b/db/seeds.rb @@ -57,6 +57,7 @@ def seed call_and_log_seed_step Seeds::TestCaseData call_and_log_seed_step Seeds::Notifications call_and_log_seed_step Seeds::CavcDashboardData + call_and_log_seed_step Seeds::VbmsExtClaim # Always run this as last one call_and_log_seed_step Seeds::StaticTestCaseData call_and_log_seed_step Seeds::StaticDispatchedAppealsTestData diff --git a/db/seeds/vbms_ext_claim.rb b/db/seeds/vbms_ext_claim.rb new file mode 100644 index 00000000000..e0d25ac3798 --- /dev/null +++ b/db/seeds/vbms_ext_claim.rb @@ -0,0 +1,209 @@ +# frozen_string_literal: true + +# VbmsExtClaim and related records are created here to test the new EP Establishment process +# To create the VbmsExtClaim table, run 'make external-db-create' +# +# To create the seeds, run 'make seed-vbms-ext-claim' +# => this can be ran multiple times to create more seeds +# +# To destroy the seeds and records related to EP Establishment testing, run 'make remove-vbms-ext-claim-seeds' +# => removes the audit tables; removes all PriorityEndProductSyncQueue, BatchProcess, and seed records; recreates audit tables +# +# To destroy the records mentioned above and re-seed, run 'make reseed-vbms-ext-claim' +# Disable :reek:InstanceVariableAssumption +module Seeds + class VbmsExtClaim < Base + + def initialize + file_number_initial_value + end + + ################# records created ################## + # 325 vbms_ext_claims (125 not connected to an EPE) + # 200 veterans (each connected to an EPE) + + # 100 HLR EPEs + # 50 out of sync with vbms + # 25 "PEND", VEC "CLR" | 25 "CAN", VEC "CLR" + # + # 50 in sync with vbms => + # 25 "CAN", VEC "CAN" | 25 "CLR", VEC "CLR" + + # 100 SC EPEs + # 50 out of sync with vbms => + # 25 "PEND", VEC "CAN" | 25 "CLR", VEC "CAN" + # + # 50 in sync with vbms => + # 25 "CLR", VEC "CLR" | 25 "CAN", VEC "CAN" + + # Each EPE has 2 request issues (one rating, one nonrating) + # 400 request issues => 200 rating, 200 nonrating + #################################################### + def seed! + create_vbms_ext_claims_with_no_end_product_establishment + create_in_sync_epes_and_vbms_ext_claims + create_out_of_sync_epes_and_vbms_ext_claims + end + + private + + # maintains previous file number values while allowing for reseeding + def file_number_initial_value + @file_number ||= 300_000 + # this seed file creates 200 new veterans on each run, 250 is sufficient margin to add more data + @file_number += 250 while Veteran.find_by(file_number: format("%09d", n: @file_number)) + end + + ## + # this out_of_sync method creates and seeds Vbms_Ext_Claims that have a Level_Status_Code DIFFERENT then the + # End_Product_Establishment sync_status in order to test the sync_job and batch_job that finds differences between + # VbmsExtClaim associated with the End Product Establishment + ## + def create_out_of_sync_epes_and_vbms_ext_claims + # 25 High Level Review, End Product Establishments that have a sync_status of "PEND" and are out_of_sync with + # vbms_ext_claims ("CLR") + 25.times do + veteran = create(:veteran, file_number: format("%09d", n: @file_number)) + @file_number += 1 + + end_product_establishment = create_end_product_establishment(:active_hlr_with_cleared_vbms_ext_claim, veteran) + created_request_issue_one = create_request_issue(:rating, end_product_establishment) + created_request_issue_two = create_request_issue(:nonrating, end_product_establishment) + end + + # 25 High Level Review, End Product Establishments that have a sync_status of "CAN" and are out_of_sync with + # vbms_ext_claims ("CLR") + 25.times do + veteran = create(:veteran, file_number: format("%09d", n: @file_number)) + @file_number += 1 + + end_product_establishment = create_end_product_establishment(:canceled_hlr_with_cleared_vbms_ext_claim, veteran) + created_request_issue_one = create_request_issue(:rating, end_product_establishment) + created_request_issue_two = create_request_issue(:nonrating, end_product_establishment) + end + + # 25 Supplemental Claims, End Product Establishments that have a sync_status of "CLR" and are out_of_sync with + # vbms_ext_claims ("CAN") + 25.times do + veteran = create(:veteran, file_number: format("%09d", n: @file_number)) + @file_number += 1 + + end_product_establishment = create_end_product_establishment(:cleared_supp_with_canceled_vbms_ext_claim, veteran) + created_request_issue_one = create_request_issue(:rating, end_product_establishment) + created_request_issue_two = create_request_issue(:nonrating, end_product_establishment) + end + + # 25 Supplemental Claims, End Product Establishments that have a sync_status of "PEND" and are out_of_sync with + # vbms_ext_claims ("CAN") + 25.times do + veteran = create(:veteran, file_number: format("%09d", n: @file_number)) + @file_number += 1 + + end_product_establishment = create_end_product_establishment(:active_supp_with_canceled_vbms_ext_claim, veteran) + created_request_issue_one = create_request_issue(:rating, end_product_establishment) + created_request_issue_two = create_request_issue(:nonrating, end_product_establishment) + end + end + + ## + # this in_sync method creates and seeds Vbms_Ext_Claims that have a Level_Status_Code matching the + # End_Product_Establishment sync_status in order to test the sync_job and batch_job that finds differences between + # VbmsExtClaim associated with the End Product Establishment. Both jobs should skip these objects because + # Level_Status_Code matches the sync_status + ## + def create_in_sync_epes_and_vbms_ext_claims + # 25 High Level Review, End Product Establishments that have a sync_status of "CAN" and are in_sync with + # vbms_ext_claims ("CAN") + 25.times do + veteran = create(:veteran, file_number: format("%09d", n: @file_number)) + @file_number += 1 + + end_product_establishment = create_end_product_establishment(:canceled_hlr_with_canceled_vbms_ext_claim, veteran) + created_request_issue_one = create_request_issue(:rating, end_product_establishment) + created_request_issue_two = create_request_issue(:nonrating, end_product_establishment) + end + + # 25 High Level Review, End Product Establishments that have a sync_status of "CLR"" and are in_sync with + # vbms_ext_claims ("CLR") + 25.times do + veteran = create(:veteran, file_number: format("%09d", n: @file_number)) + @file_number += 1 + + end_product_establishment = create_end_product_establishment(:cleared_hlr_with_cleared_vbms_ext_claim, veteran) + created_request_issue_one = create_request_issue(:rating, end_product_establishment) + created_request_issue_two = create_request_issue(:nonrating, end_product_establishment) + end + + # 25 Supplemental Claims, End Product Establishments that have a sync_status of "CLR" and are in_sync with + # vbms_ext_claims ("CLR") + 25.times do + veteran = create(:veteran, file_number: format("%09d", n: @file_number)) + @file_number += 1 + + end_product_establishment = create_end_product_establishment(:cleared_supp_with_cleared_vbms_ext_claim, veteran) + created_request_issue_one = create_request_issue(:rating, end_product_establishment) + created_request_issue_two = create_request_issue(:nonrating, end_product_establishment) + end + + # 25 Supplemental Claims, End Product Establishments that have a sync_status of "CAN" and are in sync with + # vbms_ext_claims ("CAN") + 25.times do + veteran = create(:veteran, file_number: format("%09d", n: @file_number)) + @file_number += 1 + + end_product_establishment = create_end_product_establishment(:canceled_supp_with_canceled_vbms_ext_claim, veteran) + created_request_issue_one = create_request_issue(:rating, end_product_establishment) + created_request_issue_two = create_request_issue(:nonrating, end_product_establishment) + end + end + + ## + # this method creates VBMS_EXT_CLAIMS that have yet to be Established in CASEFLOW to mimic + # the VBMS API CALL. The VBMS_EXT_CLAIMS have no assocations to an End Product Establishment. + ## + def create_vbms_ext_claims_with_no_end_product_establishment + # creates 50 non epe associated vbms_ext_claims with LEVEL_STATUS_CODE "CLR" + 50.times do + create(:vbms_ext_claim, :cleared) + end + # creates 50 none epe assocated vbms_ext_claims with LEVEL_STATUS_CODE "CAN" + 50.times do + create(:vbms_ext_claim,:canceled) + end + # creates 50 none epe assocated vbms_ext_claims with LEVEL_STATUS_CODE "RDC" + 25.times do + create(:vbms_ext_claim,:rdc) + end + end + + # 'trait' will update the following EPE columns: + # synced_status, established_at, modifier, code + # additionally, the following records will be created: + # an HLR or SC + # a VbmsExtClaim + # :reek:FeatureEnvy + def create_end_product_establishment(trait, veteran) + create(:end_product_establishment, + trait, + veteran_file_number: veteran.file_number, + claimant_participant_id: veteran.participant_id + ) + end + + # 'trait' will specify if the RI is rating or nonrating + + # if it is rating, these columns will be updated: + # contested_rating_issue_reference_id, contested_rating_issue_profile_date, decision_date + + # if it is nonrating, these columns will be updated: + # nonrating_issue_category, decision_date, nonrating_issue_description + def create_request_issue(trait, end_product_establishment) + create(:request_issue, + trait, + decision_review: end_product_establishment.source, + end_product_establishment: end_product_establishment + ) + end + + end +end diff --git a/lib/caseflow/error.rb b/lib/caseflow/error.rb index df77673dd3b..52a8948bc38 100644 --- a/lib/caseflow/error.rb +++ b/lib/caseflow/error.rb @@ -31,6 +31,12 @@ class DocumentRetrievalError < EfolderError; end class EfolderAccessForbidden < EfolderError; end class ClientRequestError < EfolderError; end + class PriorityEndProductSyncError < StandardError + def ignorable? + true + end + end + class VaDotGovAPIError < SerializableError; end class VaDotGovRequestError < VaDotGovAPIError; end class VaDotGovServerError < VaDotGovAPIError; end diff --git a/lib/tasks/custom_seed.rake b/lib/tasks/custom_seed.rake new file mode 100644 index 00000000000..d675dfd842c --- /dev/null +++ b/lib/tasks/custom_seed.rake @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +# This allows you to run a custom db:seed file +# for example: bundle exec rake db:seed:custom_seed_file_name +namespace :db do + namespace :seed do + Dir[File.join(Rails.root, "db", "seeds", "*.rb")].each do |filename| + task_name = File.basename(filename, ".rb").intern + task task_name => :environment do + load(filename) + # when bundle exec rake db:seed:vbms_ext_claim is called + # it runs the seed! method inside vbms_ext_claim.rb + class_name = task_name.to_s.camelize + Seeds.const_get(class_name).new.seed! + end + end + end +end diff --git a/spec/factories/end_product_establishment.rb b/spec/factories/end_product_establishment.rb index 0644c88a3c0..2aee01d206e 100644 --- a/spec/factories/end_product_establishment.rb +++ b/spec/factories/end_product_establishment.rb @@ -3,7 +3,13 @@ FactoryBot.define do factory :end_product_establishment do veteran_file_number { generate :veteran_file_number } - sequence(:reference_id, &:to_s) + sequence(:reference_id) do + if EndProductEstablishment.any? + (EndProductEstablishment.last.reference_id.to_i + 1).to_s + else + "1" + end + end source { create(:ramp_election, veteran_file_number: veteran_file_number) } code { "030HLRR" } modifier { "030" } @@ -24,6 +30,186 @@ established_at { 5.days.ago } end + trait :active_hlr do + synced_status { "PEND" } + established_at { 5.days.ago } + source { create(:higher_level_review, veteran_file_number: veteran_file_number) } + end + + trait :active_supp do + synced_status { "PEND" } + established_at { 5.days.ago } + source { create(:supplemental_claim, veteran_file_number: veteran_file_number) } + end + + trait :active_hlr_with_canceled_vbms_ext_claim do + active_hlr + modifier { "030" } + code { "030HLRR" } + after(:build) do |end_product_establishment, _evaluator| + create(:vbms_ext_claim, :hlr, :canceled, claim_id: end_product_establishment.reference_id) + ep = end_product_establishment.result + ep_store = Fakes::EndProductStore.new + ep_store.update_ep_status(end_product_establishment.veteran_file_number, + ep.claim_id, "CAN") + end + end + + trait :active_hlr_with_active_vbms_ext_claim do + active_hlr + modifier { "030" } + code { "030HLRR" } + after(:build) do |end_product_establishment, _evaluator| + create(:vbms_ext_claim, :hlr, :rdc, claim_id: end_product_establishment.reference_id) + ep = end_product_establishment.result + ep_store = Fakes::EndProductStore.new + ep_store.update_ep_status(end_product_establishment.veteran_file_number, + ep.claim_id, "RDC") + end + end + + trait :active_hlr_with_cleared_vbms_ext_claim do + active_hlr + modifier { "030" } + code { "030HLRR" } + after(:build) do |end_product_establishment, _evaluator| + create(:vbms_ext_claim, :hlr, :cleared, claim_id: end_product_establishment.reference_id) + ep = end_product_establishment.result + ep_store = Fakes::EndProductStore.new + ep_store.update_ep_status(end_product_establishment.veteran_file_number, + ep.claim_id, "CLR") + end + end + + trait :canceled_hlr_with_canceled_vbms_ext_claim do + canceled + established_at { 5.days.ago } + modifier { "030" } + code { "030HLRR" } + source { create(:higher_level_review, veteran_file_number: veteran_file_number) } + after(:build) do |end_product_establishment, _evaluator| + create(:vbms_ext_claim, :hlr, :canceled, claim_id: end_product_establishment.reference_id) + ep = end_product_establishment.result + ep_store = Fakes::EndProductStore.new + ep_store.update_ep_status(end_product_establishment.veteran_file_number, + ep.claim_id, "CAN") + end + end + + trait :cleared_hlr_with_cleared_vbms_ext_claim do + cleared + established_at { 5.days.ago } + modifier { "030" } + code { "030HLRR" } + source { create(:higher_level_review, veteran_file_number: veteran_file_number) } + after(:build) do |end_product_establishment, _evaluator| + create(:vbms_ext_claim, :hlr, :cleared, claim_id: end_product_establishment.reference_id) + ep = end_product_establishment.result + ep_store = Fakes::EndProductStore.new + ep_store.update_ep_status(end_product_establishment.veteran_file_number, + ep.claim_id, "CLR") + end + end + + trait :active_supp_with_canceled_vbms_ext_claim do + active_supp + modifier { "040" } + code { "040SCR" } + after(:build) do |end_product_establishment, _evaluator| + create(:vbms_ext_claim, :slc, :canceled, claim_id: end_product_establishment.reference_id) + ep = end_product_establishment.result + ep_store = Fakes::EndProductStore.new + ep_store.update_ep_status(end_product_establishment.veteran_file_number, + ep.claim_id, "CAN") + end + end + + trait :active_supp_with_active_vbms_ext_claim do + active_supp + modifier { "040" } + code { "040SCR" } + after(:build) do |end_product_establishment, _evaluator| + create(:vbms_ext_claim, :slc, :rdc, claim_id: end_product_establishment.reference_id) + ep = end_product_establishment.result + ep_store = Fakes::EndProductStore.new + ep_store.update_ep_status(end_product_establishment.veteran_file_number, + ep.claim_id, "RDC") + end + end + + trait :active_supp_with_cleared_vbms_ext_claim do + active_supp + modifier { "040" } + code { "040SCR" } + after(:build) do |end_product_establishment, _evaluator| + create(:vbms_ext_claim, :slc, :cleared, claim_id: end_product_establishment.reference_id) + ep = end_product_establishment.result + ep_store = Fakes::EndProductStore.new + ep_store.update_ep_status(end_product_establishment.veteran_file_number, + ep.claim_id, "CLR") + end + end + + trait :canceled_supp_with_canceled_vbms_ext_claim do + canceled + established_at { 5.days.ago } + modifier { "040" } + code { "040SCR" } + source { create(:supplemental_claim, veteran_file_number: veteran_file_number) } + after(:build) do |end_product_establishment, _evaluator| + create(:vbms_ext_claim, :slc, :canceled, claim_id: end_product_establishment.reference_id) + ep = end_product_establishment.result + ep_store = Fakes::EndProductStore.new + ep_store.update_ep_status(end_product_establishment.veteran_file_number, + ep.claim_id, "CAN") + end + end + + trait :cleared_supp_with_cleared_vbms_ext_claim do + cleared + established_at { 5.days.ago } + modifier { "040" } + code { "040SCR" } + source { create(:supplemental_claim, veteran_file_number: veteran_file_number) } + after(:build) do |end_product_establishment, _evaluator| + create(:vbms_ext_claim, :slc, :cleared, claim_id: end_product_establishment.reference_id) + ep = end_product_establishment.result + ep_store = Fakes::EndProductStore.new + ep_store.update_ep_status(end_product_establishment.veteran_file_number, + ep.claim_id, "CLR") + end + end + + trait :canceled_hlr_with_cleared_vbms_ext_claim do + canceled + established_at { 5.days.ago } + modifier { "030" } + code { "030HLRR" } + source { create(:higher_level_review, veteran_file_number: veteran_file_number) } + after(:build) do |end_product_establishment, _evaluator| + create(:vbms_ext_claim, :hlr, :cleared, claim_id: end_product_establishment.reference_id) + ep = end_product_establishment.result + ep_store = Fakes::EndProductStore.new + ep_store.update_ep_status(end_product_establishment.veteran_file_number, + ep.claim_id, "CLR") + end + end + + trait :cleared_supp_with_canceled_vbms_ext_claim do + cleared + established_at { 5.days.ago } + modifier { "040" } + code { "040SCR" } + source { create(:supplemental_claim, veteran_file_number: veteran_file_number) } + after(:build) do |end_product_establishment, _evaluator| + create(:vbms_ext_claim, :slc, :canceled, claim_id: end_product_establishment.reference_id) + ep = end_product_establishment.result + ep_store = Fakes::EndProductStore.new + ep_store.update_ep_status(end_product_establishment.veteran_file_number, + ep.claim_id, "CAN") + end + end + after(:build) do |end_product_establishment, _evaluator| Generators::EndProduct.build( veteran_file_number: end_product_establishment.veteran_file_number, diff --git a/spec/factories/priority_end_product_sync_queue.rb b/spec/factories/priority_end_product_sync_queue.rb new file mode 100644 index 00000000000..636f9f0220d --- /dev/null +++ b/spec/factories/priority_end_product_sync_queue.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +FactoryBot.define do + factory :priority_end_product_sync_queue do + end_product_establishment { create(:end_product_establishment, :active_hlr) } + + trait :pre_processing do + status { "PRE_PROCESSING" } + end + + trait :processing do + status { "PROCESSING" } + end + + trait :synced do + status { "SYNCED" } + end + + trait :error do + status { "ERROR" } + end + + trait :stuck do + status { "STUCK" } + end + end +end diff --git a/spec/factories/vbms_ext_claim.rb b/spec/factories/vbms_ext_claim.rb new file mode 100644 index 00000000000..b5e699f04df --- /dev/null +++ b/spec/factories/vbms_ext_claim.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +FactoryBot.define do + factory :vbms_ext_claim do + # prevents vbms_ext_claim from having a duplicate key + sequence(:claim_id) do + if VbmsExtClaim.any? + (VbmsExtClaim.last.claim_id + 1).to_s + else + "300000" + end + end + claim_date { Time.zone.now - 1.day } + sync_id { 1 } + createddt { Time.zone.now - 1.day } + establishment_date { Time.zone.now - 1.day } + lastupdatedt { Time.zone.now } + expirationdt { Time.zone.now + 5.days } + version { 22 } + prevent_audit_trig { 2 } + + trait :cleared do + LEVEL_STATUS_CODE { "CLR" } + end + + trait :canceled do + LEVEL_STATUS_CODE { "CAN" } + end + + # rdc: rating decision complete + trait :rdc do + LEVEL_STATUS_CODE { "RDC" } + end + + # high_level_review ext claim + trait :hlr do + EP_CODE { "030" } + TYPE_CODE { "030HLRR" } + PAYEE_CODE { "00" } + end + # supplemental_claim ext claim + trait :slc do + EP_CODE { "040" } + TYPE_CODE { "040SCR" } + PAYEE_CODE { "00" } + end + end +end diff --git a/spec/jobs/batch_processes/batch_process_rescue_job_spec.rb b/spec/jobs/batch_processes/batch_process_rescue_job_spec.rb new file mode 100644 index 00000000000..1cc6bc6c23f --- /dev/null +++ b/spec/jobs/batch_processes/batch_process_rescue_job_spec.rb @@ -0,0 +1,278 @@ +# frozen_string_literal: true + +require "./app/jobs/batch_processes/batch_process_rescue_job.rb" + +describe BatchProcessRescueJob, type: :job do + include ActiveJob::TestHelper + + before do + Timecop.freeze(Time.utc(2022, 1, 1, 12, 0, 0)) + allow(SlackService).to receive(:new).with(url: anything).and_return(slack_service) + allow(slack_service).to receive(:send_notification) { |_, first_arg| @slack_msg = first_arg } + end + + let(:slack_service) { SlackService.new(url: "http://www.example.com") } + + let!(:end_product_establishments_one) do + create_list(:end_product_establishment, 2, :active_hlr_with_cleared_vbms_ext_claim) + end + + let!(:pepsq_records_one) do + PopulateEndProductSyncQueueJob.perform_now + end + + let!(:first_batch_process) do + PriorityEpSyncBatchProcessJob.perform_now + end + + let!(:end_product_establishments_two) do + create_list(:end_product_establishment, 2, :active_hlr_with_cleared_vbms_ext_claim) + end + + let!(:pepsq_records_two) do + PopulateEndProductSyncQueueJob.perform_now + end + + let!(:second_batch_process) do + PriorityEpSyncBatchProcessJob.perform_now + end + + let!(:batch_process_one) do + BatchProcess.first + end + + let!(:batch_process_two) do + BatchProcess.second + end + + subject { BatchProcessRescueJob.perform_later } + + describe "#perform" do + context "when all batch processes are 'COMPLETED'" do + before do + perform_enqueued_jobs do + subject + end + end + it "all batch processes remain unchanged and do NOT reprocess" do + expect(batch_process_one).to eq(batch_process_one.reload) + expect(batch_process_two).to eq(batch_process_two.reload) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when all batch processes are 'COMPLETED' but one has a created_at time more than the ERROR DELAY" do + before do + batch_process_one.update!(created_at: Time.zone.now - (BatchProcess::ERROR_DELAY.hours + 1.hour)) + perform_enqueued_jobs do + subject + end + end + it "all batch processes remain unchanged and do NOT reprocess" do + expect(batch_process_one).to eq(batch_process_one.reload) + expect(batch_process_two).to eq(batch_process_two.reload) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when a batch process has a state of 'PRE_PROCESSING' & a created_at less than the ERROR_DELAY" do + before do + batch_process_one.update!( + state: Constants.BATCH_PROCESS.pre_processing, + created_at: Time.zone.now - (BatchProcess::ERROR_DELAY.hours - 2.hours) + ) + perform_enqueued_jobs do + subject + end + end + it "the batch process will remain unchanged and will NOT reprocess" do + expect(batch_process_one).to eq(batch_process_one.reload) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when a batch process has a state of 'PRE_PROCESSING' & a created_at more than the ERROR_DELAY" do + before do + batch_process_one.update!( + state: Constants.BATCH_PROCESS.pre_processing, + created_at: Time.zone.now - (BatchProcess::ERROR_DELAY.hours + 1.hour) + ) + perform_enqueued_jobs do + subject + end + end + it "the batch process will reprocess" do + expect(batch_process_one.state).to eq(Constants.BATCH_PROCESS.pre_processing) + expect(batch_process_one.reload.state).to eq(Constants.BATCH_PROCESS.completed) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when a batch process has a state of 'PROCESSING' & a created_at less than the ERROR_DELAY" do + before do + batch_process_one.update!( + state: Constants.BATCH_PROCESS.processing, + created_at: Time.zone.now - (BatchProcess::ERROR_DELAY.hours - 2.hours) + ) + perform_enqueued_jobs do + subject + end + end + it "the batch process will remain unchanged and will NOT reprocess" do + expect(batch_process_one).to eq(batch_process_one.reload) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when a batch process has a state of 'PROCESSING' & a created_at more than the ERROR_DELAY" do + before do + batch_process_one.update!( + state: Constants.BATCH_PROCESS.processing, + created_at: Time.zone.now - (BatchProcess::ERROR_DELAY.hours + 1.hour) + ) + perform_enqueued_jobs do + subject + end + end + it "the batch process will reprocess" do + expect(batch_process_one.state).to eq(Constants.BATCH_PROCESS.processing) + expect(batch_process_one.reload.state).to eq(Constants.BATCH_PROCESS.completed) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when two batch processes have a state of 'PRE_PROCESSING' & a created_at more than the ERROR_DELAY" do + before do + batch_process_one.update!( + state: Constants.BATCH_PROCESS.pre_processing, + created_at: Time.zone.now - (BatchProcess::ERROR_DELAY.hours + 1.hour) + ) + batch_process_two.update!( + state: Constants.BATCH_PROCESS.pre_processing, + created_at: Time.zone.now - (BatchProcess::ERROR_DELAY.hours + 1.hour) + ) + perform_enqueued_jobs do + subject + end + end + it "both batch processes will reprocess" do + expect(batch_process_one.state).to eq(Constants.BATCH_PROCESS.pre_processing) + expect(batch_process_one.reload.state).to eq(Constants.BATCH_PROCESS.completed) + expect(batch_process_two.state).to eq(Constants.BATCH_PROCESS.pre_processing) + expect(batch_process_two.reload.state).to eq(Constants.BATCH_PROCESS.completed) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when two batch processes have a state of 'PROCESSING' & a created_at more than the ERROR_DELAY" do + before do + batch_process_one.update!( + state: Constants.BATCH_PROCESS.processing, + created_at: Time.zone.now - (BatchProcess::ERROR_DELAY.hours + 1.hour) + ) + batch_process_two.update!( + state: Constants.BATCH_PROCESS.processing, + created_at: Time.zone.now - (BatchProcess::ERROR_DELAY.hours + 1.hour) + ) + perform_enqueued_jobs do + subject + end + end + it "both batch processes will reprocess" do + expect(batch_process_one.state).to eq(Constants.BATCH_PROCESS.processing) + expect(batch_process_one.reload.state).to eq(Constants.BATCH_PROCESS.completed) + expect(batch_process_two.state).to eq(Constants.BATCH_PROCESS.processing) + expect(batch_process_two.reload.state).to eq(Constants.BATCH_PROCESS.completed) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when an error occurs during the job" do + let(:standard_error) { StandardError.new("Some unexpected error occured.") } + before do + batch_process_one.update!( + state: Constants.BATCH_PROCESS.processing, + created_at: Time.zone.now - (BatchProcess::ERROR_DELAY.hours + 1.hour) + ) + batch_process_two.update!( + state: Constants.BATCH_PROCESS.processing, + created_at: Time.zone.now - 16.hours + ) + allow(Rails.logger).to receive(:error) + allow(Raven).to receive(:capture_exception) + allow(Raven).to receive(:last_event_id) { "sentry_123" } + allow(BatchProcess).to receive(:needs_reprocessing).and_return([batch_process_one, batch_process_two]) + allow(batch_process_one).to receive(:process_batch!).and_raise(standard_error) + perform_enqueued_jobs do + subject + end + end + it "the error and the backtrace will be logged" do + expect(Rails.logger).to have_received(:error).with(an_instance_of(StandardError)) + end + + it "the error will be sent to Sentry" do + expect(Raven).to have_received(:capture_exception) + .with(instance_of(StandardError), + extra: { + active_job_id: subject.job_id.to_s, + job_time: Time.zone.now.to_s + }) + end + + it "slack will be notified when job fails" do + expect(slack_service).to have_received(:send_notification).with( + "[ERROR] Error running BatchProcessRescueJob. Error: #{standard_error.message}."\ + " Active Job ID: #{subject.job_id}. See Sentry event sentry_123.", "BatchProcessRescueJob" + ) + end + + it "the job will continue after the error and process the next batch until it is completed" do + expect(batch_process_two.state).to eq(Constants.BATCH_PROCESS.completed) + end + end + + context "when there are NO batch processes that need to be reprocessed" do + before do + allow(Rails.logger).to receive(:info) + perform_enqueued_jobs do + subject + end + end + + it "a message will be logged stating that NO batch processes needed reprocessing" do + expect(Rails.logger).to have_received(:info).with( + "No Unfinished Batches Could Be Identified. Time: #{Time.zone.now}." + ) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + end +end diff --git a/spec/jobs/batch_processes/priority_ep_sync_batch_process_job_spec.rb b/spec/jobs/batch_processes/priority_ep_sync_batch_process_job_spec.rb new file mode 100644 index 00000000000..1b2716e946a --- /dev/null +++ b/spec/jobs/batch_processes/priority_ep_sync_batch_process_job_spec.rb @@ -0,0 +1,250 @@ +# frozen_string_literal: true + +require "./app/jobs/batch_processes/priority_ep_sync_batch_process_job.rb" +require "./app/models/batch_processes/batch_process.rb" + +describe PriorityEpSyncBatchProcessJob, type: :job do + include ActiveJob::TestHelper + + let(:slack_service) { SlackService.new(url: "http://www.example.com") } + + before do + allow(SlackService).to receive(:new).with(url: anything).and_return(slack_service) + allow(slack_service).to receive(:send_notification) { |_, first_arg| @slack_msg = first_arg } + end + + let!(:syncable_end_product_establishments) do + create_list(:end_product_establishment, 2, :active_hlr_with_cleared_vbms_ext_claim) + end + + let!(:end_product_establishment) do + create(:end_product_establishment, :active_hlr_with_cleared_vbms_ext_claim) + end + + let!(:pepsq_records) do + PopulateEndProductSyncQueueJob.perform_now + PriorityEndProductSyncQueue.all + end + + subject do + PriorityEpSyncBatchProcessJob.perform_later + end + + describe "#perform" do + context "when 2 records can sync successfully and 1 cannot" do + before do + end_product_establishment.vbms_ext_claim.destroy! + perform_enqueued_jobs do + subject + end + end + + it "creates one batch process record" do + expect(BatchProcess.count).to eq(1) + end + + it "the batch process has a state of 'COMPLETED'" do + expect(BatchProcess.first.state).to eq(Constants.BATCH_PROCESS.completed) + end + + it "the batch process has a 'started_at' date/time" do + expect(BatchProcess.first.started_at).not_to be_nil + end + + it "the batch process has a 'ended_at' date/time" do + expect(BatchProcess.first.ended_at).not_to be_nil + end + + it "the batch process has 2 records_completed" do + expect(BatchProcess.first.records_completed).to eq(2) + end + + it "the batch process has 1 records_failed" do + expect(BatchProcess.first.records_failed).to eq(1) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when all 3 records able to sync successfully" do + before do + perform_enqueued_jobs do + subject + end + end + + it "the batch process has a state of 'COMPLETED'" do + expect(BatchProcess.first.state).to eq(Constants.BATCH_PROCESS.completed) + end + + it "the batch process has a 'started_at' date/time" do + expect(BatchProcess.first.started_at).not_to be_nil + end + + it "the batch process has a 'ended_at' date/time" do + expect(BatchProcess.first.ended_at).not_to be_nil + end + + it "the batch process has 3 records_completed" do + expect(BatchProcess.first.records_completed).to eq(3) + end + + it "the batch process has 0 records_failed" do + expect(BatchProcess.first.records_failed).to eq(0) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when the job creates multiple batches" do + before do + # Batch limit changes to 1 to test PriorityEpSyncBatchProcessJob loop + stub_const("BatchProcess::BATCH_LIMIT", 1) + + PriorityEndProductSyncQueue.last.destroy! + perform_enqueued_jobs do + subject + end + end + + it "both batch processes have a state of 'COMPLETED'" do + expect(BatchProcess.first.state).to eq(Constants.BATCH_PROCESS.completed) + expect(BatchProcess.second.state).to eq(Constants.BATCH_PROCESS.completed) + end + + it "both batch processes have a 'started_at' date/time" do + expect(BatchProcess.first.started_at).not_to be_nil + expect(BatchProcess.second.started_at).not_to be_nil + end + + it "both batch processes have a 'ended_at' date/time" do + expect(BatchProcess.first.ended_at).not_to be_nil + expect(BatchProcess.second.ended_at).not_to be_nil + end + + it "the first batch process has 1 records_completed" do + expect(BatchProcess.first.records_completed).to eq(BatchProcess::BATCH_LIMIT) + end + + it "the second batch process has 1 records_completed" do + expect(BatchProcess.second.records_completed).to eq(BatchProcess::BATCH_LIMIT) + end + + it "both batch processes have 0 records_failed" do + expect(BatchProcess.first.records_failed).to eq(0) + expect(BatchProcess.second.records_failed).to eq(0) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when the job duration ends before all PriorityEndProductSyncQueue records can be batched" do + before do + # Batch limit of 1 limits the number of priority end product sync queue records per batch + stub_const("BatchProcess::BATCH_LIMIT", 1) + # Job duration of 0.01 seconds limits the job's loop to one iteration + stub_const("PriorityEpSyncBatchProcessJob::JOB_DURATION", 0.01.seconds) + + PriorityEndProductSyncQueue.last.destroy! + perform_enqueued_jobs do + subject + end + end + + it "only 1 batch process record is created" do + expect(BatchProcess.count).to eq(1) + end + + it "the batch process includes only 1 of the 2 available PriorityEndProductSyncQueue records" do + expect(PriorityEndProductSyncQueue.count).to eq(2) + expect(BatchProcess.first.priority_end_product_sync_queue.count).to eq(1) + end + + it "the batch process has a state of 'COMPLETED'" do + expect(BatchProcess.first.state).to eq(Constants.BATCH_PROCESS.completed) + end + + it "the batch process has a 'started_at' date/time" do + expect(BatchProcess.first.started_at).not_to be_nil + end + + it "the batch process has a 'ended_at' date/time" do + expect(BatchProcess.first.ended_at).not_to be_nil + end + + it "the batch process has 1 records_attempted" do + expect(BatchProcess.first.records_attempted).to eq(1) + end + + it "the batch process has 0 records_failed" do + expect(BatchProcess.first.records_failed).to eq(0) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when an error is raised during the job" do + let(:standard_error) { StandardError.new("Oh no! This is bad!") } + before do + allow(Rails.logger).to receive(:error) + allow(Raven).to receive(:capture_exception) + allow(Raven).to receive(:last_event_id) { "sentry_123" } + allow(PriorityEpSyncBatchProcess).to receive(:find_records_to_batch) + .and_raise(StandardError, "Oh no! This is bad!") + perform_enqueued_jobs do + subject + end + end + + it "the error and the backtrace will be logged" do + expect(Rails.logger).to have_received(:error).with(an_instance_of(StandardError)) + end + + it "the error will be sent to Sentry" do + expect(Raven).to have_received(:capture_exception) + .with(instance_of(StandardError), + extra: { + job_id: subject.job_id, + job_time: Time.zone.now.to_s + }) + end + + it "slack will be notified when job fails" do + expect(slack_service).to have_received(:send_notification).with( + "[ERROR] Error running PriorityEpSyncBatchProcessJob. Error: #{standard_error.message}."\ + " Active Job ID: #{subject.job_id}. See Sentry event sentry_123.", "PriorityEpSyncBatchProcessJob" + ) + end + end + + context "when there are no records available to batch" do + before do + PriorityEndProductSyncQueue.destroy_all + allow(Rails.logger).to receive(:info) + perform_enqueued_jobs do + subject + end + end + + it "a message that says 'Cannot Find Any Records to Batch' will be logged" do + expect(Rails.logger).to have_received(:info).with( + "PriorityEpSyncBatchProcessJob Cannot Find Any Records to Batch."\ + " Job will be enqueued again at the top of the hour."\ + " Active Job ID: #{subject.job_id}. Time: #{Time.zone.now}" + ) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + end +end diff --git a/spec/jobs/priority_queues/populate_end_product_sync_queue_job_spec.rb b/spec/jobs/priority_queues/populate_end_product_sync_queue_job_spec.rb new file mode 100644 index 00000000000..cd06a95aa5f --- /dev/null +++ b/spec/jobs/priority_queues/populate_end_product_sync_queue_job_spec.rb @@ -0,0 +1,226 @@ +# frozen_string_literal: true + +describe PopulateEndProductSyncQueueJob, type: :job do + include ActiveJob::TestHelper + + let(:slack_service) { SlackService.new(url: "http://www.example.com") } + + let!(:epes_to_be_queued) do + create_list(:end_product_establishment, 2, :active_hlr_with_cleared_vbms_ext_claim) + end + + let!(:not_found_epe) do + create(:end_product_establishment, :active_hlr_with_active_vbms_ext_claim) + end + + before do + # Batch limit changes to 1 to test PopulateEndProductSyncQueueJob loop + stub_const("PopulateEndProductSyncQueueJob::BATCH_LIMIT", 1) + end + + subject do + PopulateEndProductSyncQueueJob.perform_later + end + + describe "#perform" do + context "when all records sync successfully" do + before do + allow(SlackService).to receive(:new).with(url: anything).and_return(slack_service) + allow(slack_service).to receive(:send_notification) { |_, first_arg| @slack_msg = first_arg } + perform_enqueued_jobs do + subject + end + end + + it "adds the 2 unsynced epes to the end product synce queue" do + expect(PriorityEndProductSyncQueue.count).to eq 2 + end + + it "the current user is set to a system user" do + expect(RequestStore.store[:current_user].id).to eq(User.system_user.id) + end + + it "adds the epes to the priority end product sync queue table" do + expect(PriorityEndProductSyncQueue.first.end_product_establishment_id).to eq epes_to_be_queued.first.id + expect(PriorityEndProductSyncQueue.second.end_product_establishment_id).to eq epes_to_be_queued.second.id + end + + it "the epes are associated with a vbms_ext_claim record" do + expect(EndProductEstablishment.find(PriorityEndProductSyncQueue.first.end_product_establishment_id) + .reference_id).to eq epes_to_be_queued.first.vbms_ext_claim.claim_id.to_s + expect(EndProductEstablishment.find(PriorityEndProductSyncQueue.second.end_product_establishment_id) + .reference_id).to eq epes_to_be_queued.second.vbms_ext_claim.claim_id.to_s + end + + it "the priority end product sync queue records have a status of 'NOT_PROCESSED'" do + expect(PriorityEndProductSyncQueue.first.status).to eq "NOT_PROCESSED" + expect(PriorityEndProductSyncQueue.second.status).to eq "NOT_PROCESSED" + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when the epe's reference id is a lettered string (i.e. only match on matching numbers)" do + before do + epes_to_be_queued.each { |epe| epe.update!(reference_id: "whaddup yooo") } + perform_enqueued_jobs do + subject + end + end + + it "doesn't add epe to the queue" do + expect(PriorityEndProductSyncQueue.count).to eq 0 + end + end + + context "when a priority end product sync queue record already exists with the epe id" do + before do + PriorityEndProductSyncQueue.create(end_product_establishment_id: epes_to_be_queued.first.id) + perform_enqueued_jobs do + subject + end + end + + it "will not add same epe more than once in the priorty end product sync queue table" do + expect(PriorityEndProductSyncQueue.count).to eq 2 + end + end + + context "when the epe records' synced_status value is nil" do + before do + epes_to_be_queued.each { |epe| epe.update!(synced_status: nil) } + perform_enqueued_jobs do + subject + end + end + + it "will add the epe if epe synced status is nil and other conditions are met" do + expect(PriorityEndProductSyncQueue.count).to eq 2 + end + end + + context "when the job duration ends before all PriorityEndProductSyncQueue records can be batched" do + before do + # Job duration of 0.001 seconds limits the job's loop to one iteration + stub_const("PopulateEndProductSyncQueueJob::JOB_DURATION", 0.001.seconds) + allow(SlackService).to receive(:new).with(url: anything).and_return(slack_service) + allow(slack_service).to receive(:send_notification) { |_, first_arg| @slack_msg = first_arg } + perform_enqueued_jobs do + subject + end + end + + it "there are 3 epe records" do + expect(EndProductEstablishment.count).to eq(3) + end + + it "creates 1 priority end product sync queue record" do + expect(PriorityEndProductSyncQueue.count).to eq(1) + end + + it "the current user is set to a system user" do + expect(RequestStore.store[:current_user].id).to eq(User.system_user.id) + end + + it "adds the epes to the priority end product sync queue table" do + expect(PriorityEndProductSyncQueue.first.end_product_establishment_id).to eq epes_to_be_queued.first.id + end + + it "the epes are associated with a vbms_ext_claim record" do + expect(EndProductEstablishment.find(PriorityEndProductSyncQueue.first.end_product_establishment_id) + .reference_id).to eq epes_to_be_queued.first.vbms_ext_claim.claim_id.to_s + end + + it "the priority end product sync queue record has a status of 'NOT_PROCESSED'" do + expect(PriorityEndProductSyncQueue.first.status).to eq "NOT_PROCESSED" + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + + context "when there are no records available to batch" do + before do + EndProductEstablishment.destroy_all + allow(Rails.logger).to receive(:info) + perform_enqueued_jobs do + subject + end + end + + it "doesn't add any epes to the batch" do + expect(PriorityEndProductSyncQueue.count).to eq 0 + end + + it "logs a message that says 'PopulateEndProductSyncQueueJob is not able to find any batchable EPE records'" do + expect(Rails.logger).to have_received(:info).with( + "PopulateEndProductSyncQueueJob is not able to find any batchable EPE records."\ + " Active Job ID: #{subject.job_id}."\ + " Time: #{Time.zone.now}" + ) + end + end + + context "when an error is raised during the job" do + let(:standard_error) { StandardError.new("Uh-Oh!") } + before do + allow(Rails.logger).to receive(:error) + allow(Raven).to receive(:capture_exception) + allow(Raven).to receive(:last_event_id) { "sentry_123" } + allow(SlackService).to receive(:new).with(url: anything).and_return(slack_service) + allow(slack_service).to receive(:send_notification) { |_, first_arg| @slack_msg = first_arg } + allow_any_instance_of(PopulateEndProductSyncQueueJob) + .to receive(:find_priority_end_product_establishments_to_sync).and_raise(standard_error) + perform_enqueued_jobs do + subject + end + end + + it "the error and the backtrace will be logged" do + expect(Rails.logger).to have_received(:error).with(an_instance_of(StandardError)) + end + + it "the error will be sent to Sentry" do + expect(Raven).to have_received(:capture_exception) + .with(instance_of(StandardError), + extra: { + active_job_id: subject.job_id, + job_time: Time.zone.now.to_s + }) + end + + it "slack will be notified when job fails" do + expect(slack_service).to have_received(:send_notification).with( + "[ERROR] Error running PopulateEndProductSyncQueueJob. Error: #{standard_error.message}."\ + " Active Job ID: #{subject.job_id}. See Sentry event sentry_123.", "PopulateEndProductSyncQueueJob" + ) + end + end + + context "when there are no records available to batch" do + before do + VbmsExtClaim.destroy_all + allow(Rails.logger).to receive(:info) + allow(SlackService).to receive(:new).with(url: anything).and_return(slack_service) + allow(slack_service).to receive(:send_notification) { |_, first_arg| @slack_msg = first_arg } + perform_enqueued_jobs do + subject + end + end + + it "a message that says 'Cannot Find Any Records to Batch' will be logged" do + expect(Rails.logger).to have_received(:info).with( + "PopulateEndProductSyncQueueJob is not able to find any batchable EPE records."\ + " Active Job ID: #{subject.job_id}. Time: #{Time.zone.now}" + ) + end + + it "slack will NOT be notified when job runs successfully" do + expect(slack_service).to_not have_received(:send_notification) + end + end + end +end diff --git a/spec/models/batch_processes/batch_process_spec.rb b/spec/models/batch_processes/batch_process_spec.rb new file mode 100644 index 00000000000..3cb769334e5 --- /dev/null +++ b/spec/models/batch_processes/batch_process_spec.rb @@ -0,0 +1,175 @@ +# frozen_string_literal: true + +require "./app/models/batch_processes/batch_process.rb" + +describe BatchProcess, :postgres do + describe ".needs_reprocessing" do + before do + Timecop.freeze(Time.utc(2022, 1, 1, 12, 0, 0)) + end + + let!(:pre_processing_batch_process_within_error_delay) do + PriorityEpSyncBatchProcess.create(state: Constants.BATCH_PROCESS.pre_processing, created_at: Time.zone.now) + end + let!(:pre_processing_batch_process_outside_error_delay) do + PriorityEpSyncBatchProcess.create( + state: Constants.BATCH_PROCESS.pre_processing, created_at: Time.zone.now - (BatchProcess::ERROR_DELAY + 1).hours + ) + end + let!(:processing_batch_process_within_error_delay) do + PriorityEpSyncBatchProcess.create(state: Constants.BATCH_PROCESS.processing, created_at: Time.zone.now) + end + let!(:processing_batch_process_outside_error_delay) do + PriorityEpSyncBatchProcess.create( + state: Constants.BATCH_PROCESS.processing, created_at: Time.zone.now - (BatchProcess::ERROR_DELAY + 1).hours + ) + end + let!(:completed_batch_process_within_error_delay) do + PriorityEpSyncBatchProcess.create(state: Constants.BATCH_PROCESS.completed, created_at: Time.zone.now) + end + let!(:completed_batch_process_outside_error_delay) do + PriorityEpSyncBatchProcess.create( + state: Constants.BATCH_PROCESS.completed, created_at: Time.zone.now - (BatchProcess::ERROR_DELAY + 1).hours + ) + end + + subject { BatchProcess.needs_reprocessing.to_a } + + it "will return Batch Processes that have a state of PRE_PROCESSING and a created_at outside of the error_delay" do + expect(subject).to include(pre_processing_batch_process_outside_error_delay) + end + + it "will return Batch Processes that have a state of PROCESSING and a created_at outside of the error_delay" do + expect(subject).to include(processing_batch_process_outside_error_delay) + end + + it "will NOT return Batch Processes that have a state of PRE_PROCESSING and a created_at within the error_delay" do + expect(subject).to_not include(pre_processing_batch_process_within_error_delay) + end + + it "will NOT return Batch Processes that have a state of PROCESSING and a created_at within the error_delay" do + expect(subject).to_not include(processing_batch_process_within_error_delay) + end + + it "will NOT return Batch Processes that have a state of COMPLETED and a created_at outside of the error_delay" do + expect(subject).to_not include(completed_batch_process_outside_error_delay) + end + + it "will NOT return Batch Processes that have a state of COMPLETED and a created_at within the error_delay" do + expect(subject).to_not include(completed_batch_process_within_error_delay) + end + end + + describe ".find_records_to_batch" do + it "is a no-op method that does nothing and returns nil" do + expect(BatchProcess.find_records_to_batch).to eq(nil) + end + end + + describe ".create_batch!(_records)" do + it "is a no-op method that does nothing and returns nil" do + expect(BatchProcess.create_batch!(nil)).to eq(nil) + end + end + + describe "#process_batch!" do + let!(:batch_process) { BatchProcess.new } + it "is a no-op method that does nothing" do + end + end + + describe "#increment_completed" do + let(:batch) { BatchProcess.new } + + it "will increment @completed_count by 1" do + batch.send(:increment_completed) + expect(batch.instance_variable_get(:@completed_count)).to eq(1) + end + end + + describe "#increment_failed" do + let(:batch) { BatchProcess.new } + + it "will increment @failed_count by 1" do + batch.send(:increment_failed) + expect(batch.instance_variable_get(:@failed_count)).to eq(1) + end + end + + describe "#batch_processing!" do + let(:batch) { PriorityEpSyncBatchProcess.new } + + before do + Timecop.freeze(Time.utc(2022, 1, 1, 12, 0, 0)) + end + + it "will update the Batch Process state to PROCESSING" do + batch.send(:batch_processing!) + expect(batch.state).to eq(Constants.BATCH_PROCESS.processing) + end + + it "will update started_at to the current date/time" do + batch.send(:batch_processing!) + expect(batch.started_at).to eq(Time.zone.now) + end + end + + describe "#batch_complete!" do + let(:batch) { PriorityEpSyncBatchProcess.new } + + before do + Timecop.freeze(Time.utc(2022, 1, 1, 12, 0, 0)) + batch.instance_variable_set(:@completed_count, 1) + batch.instance_variable_set(:@failed_count, 1) + end + + it "will update the Batch Process state to COMPLETED" do + batch.send(:batch_complete!) + expect(batch.state).to eq(Constants.BATCH_PROCESS.completed) + end + + it "will update the Batch Process records_completed" do + batch.send(:batch_complete!) + expect(batch.records_failed).to eq(batch.instance_variable_get(:@completed_count)) + end + + it "will update the Batch Process records_failed" do + batch.send(:batch_complete!) + expect(batch.records_failed).to eq(batch.instance_variable_get(:@failed_count)) + end + + it "will update ended_at to the current date/time" do + batch.send(:batch_complete!) + expect(batch.ended_at).to eq(Time.zone.now) + end + end + + describe "#error_out_record!(record, error)" do + let(:batch) { BatchProcess.new } + let!(:record) { create(:priority_end_product_sync_queue) } + let(:error) { "Rspec Test Error" } + subject { record } + + context "when a record encounters an error" do + it "a new error message is added to error_messages" do + batch.send(:error_out_record!, subject, error) + subject.reload + expect(subject.error_messages.count).to eq(1) + end + + it "the record is inspected to see if it's STUCK" do + batch.send(:error_out_record!, subject, error + " 1") + batch.send(:error_out_record!, subject, error + " 2") + batch.send(:error_out_record!, subject, error + " 3") + subject.reload + expect(subject.status).to eq(Constants.PRIORITY_EP_SYNC.stuck) + end + + it "status is changed to ERROR" do + batch.send(:error_out_record!, subject, error) + subject.reload + expect(subject.status).to eq(Constants.PRIORITY_EP_SYNC.error) + end + end + end +end diff --git a/spec/models/batch_processes/priority_ep_sync_batch_process_spec.rb b/spec/models/batch_processes/priority_ep_sync_batch_process_spec.rb new file mode 100644 index 00000000000..1ebb9ebd8a8 --- /dev/null +++ b/spec/models/batch_processes/priority_ep_sync_batch_process_spec.rb @@ -0,0 +1,304 @@ +# frozen_string_literal: true + +require "./app/models/batch_processes/priority_ep_sync_batch_process.rb" +require "test_prof/recipes/rspec/let_it_be" + +describe PriorityEpSyncBatchProcess, :postgres do + before do + Timecop.freeze(Time.utc(2022, 1, 1, 12, 0, 0)) + end + + describe ".find_records_to_batch" do + # Bulk creation of Pepsq records + let_it_be(:pepsq_records) { create_list(:priority_end_product_sync_queue, BatchProcess::BATCH_LIMIT - 10) } + + # Pepsq Records for Status Checks + let_it_be(:pepsq_pre_processing) { create(:priority_end_product_sync_queue, :pre_processing) } + let_it_be(:pepsq_processing) { create(:priority_end_product_sync_queue, :processing) } + let_it_be(:pepsq_synced) { create(:priority_end_product_sync_queue, :synced) } + let_it_be(:pepsq_error) { create(:priority_end_product_sync_queue, :error) } + let_it_be(:pepsq_stuck) { create(:priority_end_product_sync_queue, :stuck) } + + # Batch Processes for state check + let_it_be(:bp_pre_processing) { PriorityEpSyncBatchProcess.create(state: "PRE_PROCESSING") } + let_it_be(:bp_processing) { PriorityEpSyncBatchProcess.create(state: "PROCESSING") } + let_it_be(:bp_complete) { PriorityEpSyncBatchProcess.create(state: "COMPLETED") } + + # Batch_id of nil or batch_process.state of COMPLETED + let_it_be(:pepsq_batch_complete) { create(:priority_end_product_sync_queue, batch_id: bp_pre_processing.batch_id) } + let_it_be(:pepsq_batch_processing) { create(:priority_end_product_sync_queue, batch_id: bp_processing.batch_id) } + let_it_be(:pepsq_batch_pre_processing) { create(:priority_end_product_sync_queue, batch_id: bp_complete.batch_id) } + + # Additional records for last_batched_at checks + let!(:pepsq_lba_before_error_delay_ends) do + create(:priority_end_product_sync_queue, last_batched_at: Time.zone.now) + end + let!(:pepsq_lba_aftere_error_delay_ends) do + create(:priority_end_product_sync_queue, last_batched_at: Time.zone.now - 14.hours) + end + + # Additional records to test the BATCH_LIMIT + let!(:pepsq_additional_records) { create_list(:priority_end_product_sync_queue, 6) } + + subject { PriorityEpSyncBatchProcess.find_records_to_batch } + + it "will only return records that have a NULL batch_id OR have a batch_id tied to a COMPLETED batch process \n + and will return records that have a status of NOT_PROCESSED, PRE_PROCESSING, PROCESSING, or ERROR" do + expect(subject.all? { |r| r.batch_id.nil? || r.batch_process.state == "COMPLETED" }).to eq(true) + expect(subject.all? { |r| r&.batch_process&.state == "PRE_PROCESSING" }).to eq(false) + expect(subject.all? { |r| r&.batch_process&.state == "PROCESSING" }).to eq(false) + expect(subject.all? do |r| + r.status == Constants.PRIORITY_EP_SYNC.not_processed || + r.status == Constants.PRIORITY_EP_SYNC.pre_processing || + r.status == Constants.PRIORITY_EP_SYNC.processing || + r.status == Constants.PRIORITY_EP_SYNC.error + end).to eq(true) + end + + it "will NOT return records that have a status of SYNCED OR STUCK \n + and will NOT return records with a last_batched_at that is within the ERROR_DELAY" do + expect(subject.all? { |r| r.status == Constants.PRIORITY_EP_SYNC.synced }).to eq(false) + expect(subject.all? { |r| r.status == Constants.PRIORITY_EP_SYNC.stuck }).to eq(false) + expect(subject.none? do |r| + r.last_batched_at.present? && r.last_batched_at > BatchProcess::ERROR_DELAY.hours.ago + end).to eq(true) + end + + it "will only return records with a last_batched_at that is NULL OR outside of the ERROR_DELAY \n + and number of records returned will not exceed the BATCH_LIMIT when available records exceed the BATCH_LIMIT" do + expect(subject.all? { |r| r.last_batched_at.nil? || r.last_batched_at <= BatchProcess::ERROR_DELAY.hours.ago }) + .to eq(true) + expect(subject.include?(pepsq_lba_aftere_error_delay_ends)).to eq(true) + expect(subject.include?(pepsq_lba_before_error_delay_ends)).to eq(false) + expect(PriorityEndProductSyncQueue.count).to eq(BatchProcess::BATCH_LIMIT + 6) + expect(subject.count).to eq(BatchProcess::BATCH_LIMIT) + end + end + + describe ".create_batch!" do + let_it_be(:pepsq_records) { create_list(:priority_end_product_sync_queue, 10) } + subject { PriorityEpSyncBatchProcess.create_batch!(pepsq_records) } + + before do + subject + end + + it "will create a new batch_process and \n + will set the batch_type of the new batch_process to 'PriorityEpSyncBatchProcess'" do + expect(subject.class).to be(PriorityEpSyncBatchProcess) + expect(BatchProcess.all.count).to eq(1) + expect(subject.batch_type).to eq(PriorityEpSyncBatchProcess.name) + end + + it "will set the state of the new batch_process to 'PRE_PROCESSING' and \n + will set records_attempted of the new batch_process to the number of records batched" do + expect(subject.state).to eq(Constants.BATCH_PROCESS.pre_processing) + expect(subject.records_attempted).to eq(pepsq_records.count) + end + + it "will assign the newly created batch_process batch_id to all newly batched records, \n + will set the status of each newly batched record to 'PRE_PROCESSING', \n + and will set the last_batched_at Date/Time of each newly batched record to the current Date/Time " do + all_pepsq_batch_ids = pepsq_records.pluck(:batch_id) + expect(all_pepsq_batch_ids).to all(eq(subject.batch_id)) + all_pepsq_statuses = pepsq_records.pluck(:status) + expect(all_pepsq_statuses).to all(eq(Constants.PRIORITY_EP_SYNC.pre_processing)) + all_pepsq_last_batched_at_times = pepsq_records.pluck(:last_batched_at) + expect(all_pepsq_last_batched_at_times).to all(eq(Time.zone.now)) + end + end + + describe "#process_batch!" do + let!(:canceled_hlr_epe_w_canceled_vbms_ext_claim) do + create(:end_product_establishment, :canceled_hlr_with_canceled_vbms_ext_claim) + end + let!(:active_hlr_epe_w_canceled_vbms_ext_claim) do + create(:end_product_establishment, :active_hlr_with_canceled_vbms_ext_claim) + end + let!(:active_hlr_epe_w_active_vbms_ext_claim) do + create(:end_product_establishment, :active_hlr_with_active_vbms_ext_claim) + end + let!(:active_hlr_epe_w_cleared_vbms_ext_claim) do + create(:end_product_establishment, :active_hlr_with_cleared_vbms_ext_claim) + end + let!(:cleared_hlr_epe_w_cleared_vbms_ext_claim) do + create(:end_product_establishment, :cleared_hlr_with_cleared_vbms_ext_claim) + end + let!(:canceled_supp_epe_w_canceled_vbms_ext_claim) do + create(:end_product_establishment, :canceled_supp_with_canceled_vbms_ext_claim) + end + let!(:active_supp_epe_w_canceled_vbms_ext_claim) do + create(:end_product_establishment, :active_supp_with_canceled_vbms_ext_claim) + end + let!(:active_supp_epe_w_active_vbms_ext_claim) do + create(:end_product_establishment, :active_supp_with_active_vbms_ext_claim) + end + let!(:active_supp_epe_w_cleared_vbms_ext_claim) do + create(:end_product_establishment, :active_supp_with_canceled_vbms_ext_claim) + end + let!(:cleared_supp_epes_w_cleared_vbms_ext_claim) do + create(:end_product_establishment, :cleared_supp_with_cleared_vbms_ext_claim) + end + + let!(:all_end_product_establishments) do + EndProductEstablishment.all + end + + let!(:pepsq_records) do + PopulateEndProductSyncQueueJob.perform_now + PriorityEndProductSyncQueue.all + end + + let!(:batch_process) { PriorityEpSyncBatchProcess.create_batch!(pepsq_records) } + + subject { batch_process.process_batch! } + + context "when all batched records in the queue are able to sync successfully" do + before do + subject + pepsq_records.each(&:reload) + end + it "each batched record in the queue will have a status of 'SYNCED' \n + and the batch process will have a state of 'COMPLETED'" do + all_pepsq_statuses = pepsq_records.pluck(:status) + expect(all_pepsq_statuses).to all(eq(Constants.PRIORITY_EP_SYNC.synced)) + expect(batch_process.state).to eq(Constants.BATCH_PROCESS.completed) + end + + it "the number of records_attempted for the batch process will match the number of PEPSQ records batched, \n + the number of records_completed for the batch process will match the number of PEPSQ records synced, \n + and the number of records_failed for the batch process will match the number of PEPSQ records not synced" do + expect(batch_process.records_attempted).to eq(pepsq_records.count) + all_synced_pepsq_records = pepsq_records.select { |record| record.status == Constants.PRIORITY_EP_SYNC.synced } + expect(batch_process.records_completed).to eq(all_synced_pepsq_records.count) + all_synced_pepsq_records = pepsq_records.reject { |record| record.status == Constants.PRIORITY_EP_SYNC.synced } + expect(batch_process.records_failed).to eq(all_synced_pepsq_records.count) + end + end + + context "when one of the batched records fails because the synced_status and level_status_code do not match" do + before do + active_hlr_epe_w_cleared_vbms_ext_claim.vbms_ext_claim.update!(level_status_code: "CAN") + allow(Rails.logger).to receive(:error) + subject + pepsq_records.each(&:reload) + end + + it "all but ONE of the batched records will have a status of 'SYNCED'" do + synced_status_pepsq_records = pepsq_records.select { |r| r.status == Constants.PRIORITY_EP_SYNC.synced } + not_synced_status_pepsq_records = pepsq_records.reject { |r| r.status == Constants.PRIORITY_EP_SYNC.synced } + expect(synced_status_pepsq_records.count).to eq(pepsq_records.count - not_synced_status_pepsq_records.count) + expect(not_synced_status_pepsq_records.count).to eq(pepsq_records.count - synced_status_pepsq_records.count) + end + + it "the failed batched record will have a status of 'ERROR' \n + and the failed batched record will raise and log error" do + pepsq_record_without_synced_status = pepsq_records.find { |r| r.status != Constants.PRIORITY_EP_SYNC.synced } + expect(pepsq_record_without_synced_status.status).to eq(Constants.PRIORITY_EP_SYNC.error) + active_hlr_epe_w_cleared_vbms_ext_claim.reload + expect(Rails.logger).to have_received(:error) + .with("#") + end + + it "the batch process will have a state of 'COMPLETED', \n + the number of records_attempted for the batch process will match the number of PEPSQ records batched, \n + the number of records_completed for the batch process will match the number of successfully synced records \n + the number of records_failed for the batch process will match the number of errored records" do + expect(batch_process.state).to eq(Constants.BATCH_PROCESS.completed) + expect(batch_process.records_attempted).to eq(pepsq_records.count) + synced_pepsq_records = pepsq_records.select { |r| r.status == Constants.PRIORITY_EP_SYNC.synced } + expect(batch_process.records_completed).to eq(synced_pepsq_records.count) + failed_sync_pepsq_records = pepsq_records.reject { |r| r.status == Constants.PRIORITY_EP_SYNC.synced } + expect(batch_process.records_failed).to eq(failed_sync_pepsq_records.count) + end + end + + context "when one of the batched records fails because there is no related End Product within Vbms_Ext_Claim" do + before do + active_hlr_epe_w_cleared_vbms_ext_claim.vbms_ext_claim.destroy! + allow(Rails.logger).to receive(:error) + subject + pepsq_records.each(&:reload) + end + + it "all but ONE of the batched records will have a status of 'SYNCED'" do + synced_pepsq_records = pepsq_records.select { |r| r.status == Constants.PRIORITY_EP_SYNC.synced } + not_synced_pepsq_records = pepsq_records.reject { |r| r.status == Constants.PRIORITY_EP_SYNC.synced } + expect(synced_pepsq_records.count).to eq(pepsq_records.count - not_synced_pepsq_records.count) + expect(not_synced_pepsq_records.count).to eq(pepsq_records.count - synced_pepsq_records.count) + end + + it "the failed batched record will have a status of 'ERROR' \n + and the failed batched record will raise and log error" do + pepsq_record_without_synced_status = pepsq_records.find { |r| r.status != Constants.PRIORITY_EP_SYNC.synced } + expect(pepsq_record_without_synced_status.status).to eq(Constants.PRIORITY_EP_SYNC.error) + expect(Rails.logger).to have_received(:error) + .with("#") + end + + it "calling '.vbms_ext_claim' on the failed batched record's End Product Establishment will return nil" do + pepsq_record_without_synced_status = pepsq_records.find { |r| r.status != Constants.PRIORITY_EP_SYNC.synced } + expect(pepsq_record_without_synced_status.end_product_establishment.vbms_ext_claim).to eq(nil) + end + + it "the batch process will have a state of 'COMPLETED', \n + the number of records_attempted for the batch process will match the number of PEPSQ records batched, \n + the number of records_completed for the batch process will match the number of successfully synced records, \n + and the number of records_failed for the batch process will match the number of errored records" do + expect(batch_process.state).to eq(Constants.BATCH_PROCESS.completed) + expect(batch_process.records_attempted).to eq(pepsq_records.count) + synced_pepsq_records = pepsq_records.select { |r| r.status == Constants.PRIORITY_EP_SYNC.synced } + expect(batch_process.records_completed).to eq(synced_pepsq_records.count) + failed_sync_pepsq_records = pepsq_records.reject { |r| r.status == Constants.PRIORITY_EP_SYNC.synced } + expect(batch_process.records_failed).to eq(failed_sync_pepsq_records.count) + end + end + + context "when one of the batched records fails because the End Product does not exist in BGS" do + before do + epe = EndProductEstablishment.find active_hlr_epe_w_cleared_vbms_ext_claim.id + Fakes::EndProductStore.cache_store.redis.del("end_product_records_test:#{epe.veteran_file_number}") + allow(Rails.logger).to receive(:error) + subject + pepsq_records.each(&:reload) + end + + it "all but ONE of the batched records will have a status of 'SYNCED'" do + synced_pepsq_records = pepsq_records.select { |r| r.status == Constants.PRIORITY_EP_SYNC.synced } + not_synced_pepsq_records = pepsq_records.reject { |r| r.status == Constants.PRIORITY_EP_SYNC.synced } + expect(synced_pepsq_records.count).to eq(pepsq_records.count - not_synced_pepsq_records.count) + expect(synced_pepsq_records.count).to eq(pepsq_records.count - 1) + expect(not_synced_pepsq_records.count).to eq(pepsq_records.count - synced_pepsq_records.count) + expect(not_synced_pepsq_records.count).to eq(1) + end + + it "the failed batched record will have a status of 'ERROR' \n + and the failed batched record will raise and log error" do + pepsq_record_without_synced_status = pepsq_records.find { |r| r.status != Constants.PRIORITY_EP_SYNC.synced } + expect(pepsq_record_without_synced_status.status).to eq(Constants.PRIORITY_EP_SYNC.error) + expect(Rails.logger).to have_received(:error) + .with("#") + end + + it "the batch process will have a state of 'COMPLETED' \n + and the number of records_attempted for the batch process will match the number of PEPSQ records batched" do + expect(batch_process.state).to eq(Constants.BATCH_PROCESS.completed) + expect(batch_process.records_attempted).to eq(pepsq_records.count) + end + + it "the number of records_completed for the batch process will match the number of successfully synced records \n + and the number of records_failed for the batch process will match the number of errored records" do + synced_pepsq_records = pepsq_records.select { |r| r.status == Constants.PRIORITY_EP_SYNC.synced } + expect(batch_process.records_completed).to eq(synced_pepsq_records.count) + failed_sync_pepsq_records = pepsq_records.reject { |r| r.status == Constants.PRIORITY_EP_SYNC.synced } + expect(batch_process.records_failed).to eq(failed_sync_pepsq_records.count) + end + end + end +end diff --git a/spec/models/caseflow_stuck_record_spec.rb b/spec/models/caseflow_stuck_record_spec.rb new file mode 100644 index 00000000000..a6504482604 --- /dev/null +++ b/spec/models/caseflow_stuck_record_spec.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +describe CaseflowStuckRecord, :postgres do + describe "#end_product_establishment" do + let!(:end_product_establishment) do + create(:end_product_establishment, :canceled_hlr_with_cleared_vbms_ext_claim) + end + + let!(:caseflow_stuck_record) do + PopulateEndProductSyncQueueJob.perform_now + + 3.times do + PriorityEndProductSyncQueue.first.update!(last_batched_at: nil) + PriorityEpSyncBatchProcessJob.perform_now + end + + CaseflowStuckRecord.first + end + + it "will return the end_product_establishment when the stuck record is from the Priority End Product Sync Queue" do + expect(caseflow_stuck_record.end_product_establishment).to eq(end_product_establishment) + end + end +end diff --git a/spec/models/end_product_establishment_spec.rb b/spec/models/end_product_establishment_spec.rb index e1005d656bb..a35debddca7 100644 --- a/spec/models/end_product_establishment_spec.rb +++ b/spec/models/end_product_establishment_spec.rb @@ -851,6 +851,19 @@ ] end + context "when lock acquisition fails" do + before do + allow(RedisMutex).to receive(:with_lock).and_raise(RedisMutex::LockError) + end + + it "logs the error message" do + expect(Rails.logger).to receive(:error) + .with("Failed to acquire lock for EPE ID: #{end_product_establishment.id}!"\ + " #sync! is being called by another process. Please try again later.") + end_product_establishment.sync! + end + end + context "when matching end product has not yet been established" do it "raises EstablishedEndProductNotFound error" do expect { subject }.to raise_error(EndProductEstablishment::EstablishedEndProductNotFound) @@ -1453,4 +1466,59 @@ )) end end + + let!(:queued_end_product_establishment) do + EndProductEstablishment.create( + payee_code: "10", + source_id: 1, + source_type: "HigherLevelReview", + veteran_file_number: 1 + ) + end + let!(:non_queued_end_product_establishment) do + EndProductEstablishment.create( + payee_code: "10", + source_id: 2, + source_type: "HigherLevelReview", + veteran_file_number: 1 + ) + end + let!(:priority_end_product_sync_queue) do + PriorityEndProductSyncQueue.create( + batch_id: nil, + created_at: Time.zone.now, + end_product_establishment_id: queued_end_product_establishment.id, + error_messages: [], + last_batched_at: nil, + status: "NOT_PROCESSED" + ) + end + + context "#priority_end_product_sync_queue" do + context "if the End Product Establishment is not enqueued in the Priority End Product Sync Queue" do + it "will return nil" do + expect(non_queued_end_product_establishment.priority_end_product_sync_queue).to eq(nil) + end + end + + context "if the End Product Establishment is enqueued in the Priority End Product Sync Queue" do + it "will return the record that is enqueued to sync from the Priority End Product Sync Queue" do + expect(non_queued_end_product_establishment.priority_end_product_sync_queue).to eq(nil) + end + end + end + + context "#priority_queued?" do + context "if the End Product Establishment is not enqueued in the Priority End Product Sync Queue" do + it "will return False" do + expect(non_queued_end_product_establishment.priority_queued?).to eq(false) + end + end + + context "if the End Product Establishment is enqueued in the Priority End Product Sync Queue" do + it "will return True" do + expect(queued_end_product_establishment.priority_queued?).to eq(true) + end + end + end end diff --git a/spec/models/priority_queues/priority_end_product_sync_queue_spec.rb b/spec/models/priority_queues/priority_end_product_sync_queue_spec.rb new file mode 100644 index 00000000000..984dcbb550d --- /dev/null +++ b/spec/models/priority_queues/priority_end_product_sync_queue_spec.rb @@ -0,0 +1,235 @@ +# frozen_string_literal: true + +describe PriorityEndProductSyncQueue, :postgres do + describe ".batchable" do + before do + Timecop.freeze(Time.utc(2022, 1, 1, 12, 0, 0)) + end + + let!(:pre_processing_batch_process) do + PriorityEpSyncBatchProcess.create(state: Constants.BATCH_PROCESS.pre_processing) + end + let!(:processing_batch_process) { PriorityEpSyncBatchProcess.create(state: Constants.BATCH_PROCESS.processing) } + let!(:completed_batch_process) { PriorityEpSyncBatchProcess.create(state: Constants.BATCH_PROCESS.completed) } + let!(:queued_record_never_batched) { create(:priority_end_product_sync_queue, last_batched_at: nil) } + let!(:queued_record_batched_and_completed) do + create(:priority_end_product_sync_queue, batch_id: completed_batch_process.batch_id) + end + let!(:queued_record_batched_and_processing) do + create(:priority_end_product_sync_queue, batch_id: processing_batch_process.batch_id) + end + let!(:queued_record_batched_and_pre_processing) do + create(:priority_end_product_sync_queue, batch_id: pre_processing_batch_process.batch_id) + end + + subject { PriorityEndProductSyncQueue.batchable.to_a } + + it "will return a Priority End Product Sync Queue record that has never been batched" do + expect(subject).to include(queued_record_never_batched) + end + + it "will return a Priority End Product Sync Queue record that is tied to a COMPLETED Batch Process" do + expect(subject).to include(queued_record_batched_and_completed) + end + + it "will NOT return a Priority End Product Sync Queue record that is tied to a PROCESSING Batch Process" do + expect(subject).to_not include(queued_record_batched_and_processing) + end + + it "will NOT return a Priority End Product Sync Queue record that is tied to a PRE_PROCESSING Batch Process" do + expect(subject).to_not include(queued_record_batched_and_pre_processing) + end + end + + describe ".ready_to_batch" do + before do + Timecop.freeze(Time.utc(2022, 1, 1, 12, 0, 0)) + end + + let!(:queued_record_never_batched) { create(:priority_end_product_sync_queue, last_batched_at: nil) } + let!(:queued_record_just_batched) { create(:priority_end_product_sync_queue, last_batched_at: Time.zone.now) } + let!(:queued_record_batched_within_error_delay) do + create(:priority_end_product_sync_queue, last_batched_at: Time.zone.now - (BatchProcess::ERROR_DELAY - 1).hours) + end + let!(:queued_record_batched_after_error_delay) do + create(:priority_end_product_sync_queue, last_batched_at: Time.zone.now - (BatchProcess::ERROR_DELAY + 1).hours) + end + + subject { PriorityEndProductSyncQueue.ready_to_batch.to_a } + + it "will return a Priority End Product Sync Queue record that has never been batched" do + expect(subject).to include(queued_record_never_batched) + end + + it "will return a Priority End Product Sync Queue record that was batched outside of the ERROR_DELAY" do + expect(subject).to include(queued_record_batched_after_error_delay) + end + + it "will NOT return a Priority End Product Sync Queue record that was just batched" do + expect(subject).to_not include(queued_record_just_batched) + end + + it "will NOT return a Priority End Product Sync Queue record that was batched within the ERROR_DELAY" do + expect(subject).to_not include(queued_record_batched_within_error_delay) + end + end + + describe ".syncable" do + let!(:not_processed_record) { create(:priority_end_product_sync_queue) } + let!(:pre_processing_record) { create(:priority_end_product_sync_queue, :pre_processing) } + let!(:processing_record) { create(:priority_end_product_sync_queue, :processing) } + let!(:error_record) { create(:priority_end_product_sync_queue, :error) } + let!(:synced_record) { create(:priority_end_product_sync_queue, :synced) } + let!(:stuck_record) { create(:priority_end_product_sync_queue, :stuck) } + + subject { PriorityEndProductSyncQueue.syncable.to_a } + + it "will return a Priority End Product Sync Queue records with a status of NOT_PROCESSED" do + expect(not_processed_record.status).to eq(Constants.PRIORITY_EP_SYNC.not_processed) + expect(subject).to include(not_processed_record) + end + + it "will return a Priority End Product Sync Queue records with a status of PRE_PROCESSING" do + expect(pre_processing_record.status).to eq(Constants.PRIORITY_EP_SYNC.pre_processing) + expect(subject).to include(pre_processing_record) + end + + it "will return a Priority End Product Sync Queue records with a status of PROCESSING" do + expect(processing_record.status).to eq(Constants.PRIORITY_EP_SYNC.processing) + expect(subject).to include(processing_record) + end + + it "will return a Priority End Product Sync Queue records with a status of ERROR" do + expect(error_record.status).to eq(Constants.PRIORITY_EP_SYNC.error) + expect(subject).to include(error_record) + end + + it "will NOT return a Priority End Product Sync Queue records with a status of SYNCED" do + expect(synced_record.status).to eq(Constants.PRIORITY_EP_SYNC.synced) + expect(subject).to_not include(synced_record) + end + + it "will NOT return a Priority End Product Sync Queue records with a status of STUCK" do + expect(stuck_record.status).to eq(Constants.PRIORITY_EP_SYNC.stuck) + expect(subject).to_not include(stuck_record) + end + end + + describe "#status_processing!" do + let!(:queued_record) { create(:priority_end_product_sync_queue) } + it "will update the record's status to PROCESSING" do + queued_record.status_processing! + expect(queued_record.status).to eq(Constants.PRIORITY_EP_SYNC.processing) + end + end + + describe "#status_sync!" do + let!(:queued_record) { create(:priority_end_product_sync_queue) } + it "will update the record's status to SYNCED" do + queued_record.status_sync! + expect(queued_record.status).to eq(Constants.PRIORITY_EP_SYNC.synced) + end + end + + describe "#status_error!" do + let!(:queued_record) { create(:priority_end_product_sync_queue) } + let(:errors) { ["Rspec Testing Error", "Another Error", "Too many errors!"] } + + it "will update the record's status to ERROR" do + queued_record.status_error!(errors) + expect(queued_record.status).to eq(Constants.PRIORITY_EP_SYNC.error) + end + + it "will add the ERROR to error_messages" do + queued_record.status_error!(errors) + expect(queued_record.error_messages).to eq(errors) + end + end + + describe "#declare_record_stuck" do + let!(:batch_process) { PriorityEpSyncBatchProcess.create } + + let!(:record) do + create(:priority_end_product_sync_queue, + error_messages: ["Rspec Testing Error", "Oh No!", "Help I'm Stuck!"], + batch_id: batch_process.batch_id) + end + + subject { record.declare_record_stuck! } + + before do + allow(Raven).to receive(:capture_message) + subject + end + + context "when a record is determined to be stuck" do + it "the record's status will be updated to STUCK" do + expect(record.status).to eq(Constants.PRIORITY_EP_SYNC.stuck) + end + + it "an associated record will be inserted into the caseflow_stuck_records table" do + found_record = CaseflowStuckRecord.find_by(stuck_record: record) + expect(record.caseflow_stuck_records).to include(found_record) + end + + it "a message will be sent to Sentry" do + expect(Raven).to have_received(:capture_message) + .with("StuckRecordAlert::SyncFailed End Product Establishment ID: #{record.end_product_establishment_id}.", + extra: { + batch_id: record.batch_id, + batch_process_type: record.batch_process.class.name, + caseflow_stuck_record_id: record.caseflow_stuck_records.first.id, + determined_stuck_at: anything, + end_product_establishment_id: record.end_product_establishment_id, + queue_type: record.class.name, + queue_id: record.id + }, level: "error") + end + end + end + + let!(:end_product_establishment) do + EndProductEstablishment.create( + payee_code: "10", + source_id: 1, + source_type: "HigherLevelReview", + veteran_file_number: 1 + ) + end + + let!(:batch_process) { PriorityEpSyncBatchProcess.create } + + let!(:pepsq) do + PriorityEndProductSyncQueue.create( + batch_id: batch_process.id, + created_at: Time.zone.now, + end_product_establishment_id: end_product_establishment.id, + error_messages: [], + last_batched_at: nil, + status: "PRE_PROCESSING" + ) + end + + let!(:caseflow_stuck_record) do + CaseflowStuckRecord.create(determined_stuck_at: Time.zone.now, + stuck_record: pepsq) + end + + describe "#end_product_establishment" do + it "will return the End Product Establishment object" do + expect(pepsq.end_product_establishment).to eq(end_product_establishment) + end + end + + describe "#batch_process" do + it "will return the Batch Process object" do + expect(pepsq.batch_process).to eq(batch_process) + end + end + + describe "#caseflow_stuck_records" do + it "will return Caseflow Stuck Record objects" do + expect(pepsq.caseflow_stuck_records).to include(caseflow_stuck_record) + end + end +end diff --git a/spec/seeds/vbms_ext_claim_spec.rb b/spec/seeds/vbms_ext_claim_spec.rb new file mode 100644 index 00000000000..0d9fc6a9d98 --- /dev/null +++ b/spec/seeds/vbms_ext_claim_spec.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +describe Seeds::VbmsExtClaim do + let(:seed) { Seeds::VbmsExtClaim.new } + + context "#seed!" do + it "seeds total of 325 VBMS EXT CLAIMS, 100 High Level Review EndProduct Establishments + 100 Supplemental Claim End Product Establishments, and 125 Non Associated End Product + Establishments" do + seed.seed! + expect(VbmsExtClaim.count).to eq(325) + expect(HigherLevelReview.count).to eq(100) + expect(SupplementalClaim.count).to eq(100) + expect(VbmsExtClaim.where(ep_code: nil).count).to eq(125) + end + end + + context "#create_vbms_ext_claims_with_no_end_product_establishment" do + it "seeds total of 125 VBMS EXT CLAIMS Not associated with an EPE" do + seed.send(:create_vbms_ext_claims_with_no_end_product_establishment) + expect(VbmsExtClaim.count).to eq(125) + expect(VbmsExtClaim.where(ep_code: nil).count).to eq(125) + end + end + + context "#create_in_sync_epes_and_vbms_ext_claims" do + it "seeds total of 100 VBMS EXT CLAIMS Associated with 50 High Level Review End Product + Establishments and 50 Supplemental Claims End Product Establishments that are in sync" do + seed.send(:create_in_sync_epes_and_vbms_ext_claims) + expect(VbmsExtClaim.count).to eq(100) + # need to show where VbmsExtClaim and EndProductEstablishment are in_sync + # where Level_status_code CAN is equal to sync_status code CAN + expect(VbmsExtClaim.where(level_status_code: "CAN").count).to eq(EndProductEstablishment + .where(synced_status: "CAN").count) + expect(VbmsExtClaim.where(level_status_code: "CLR").count).to eq(EndProductEstablishment + .where(synced_status: "CLR").count) + expect(HigherLevelReview.count).to eq(50) + expect(SupplementalClaim.count).to eq(50) + expect(EndProductEstablishment.count).to eq(100) + end + end + context "#create_out_of_sync_epes_and_vbms_ext_claims" do + it "seeds total of 100 VBMS EXT CLAIMS Associated with 50 High Level Review End Product + Establishments and 50 Supplemental Claims End Product Establishments that are out + of sync" do + seed.send(:create_out_of_sync_epes_and_vbms_ext_claims) + expect(VbmsExtClaim.count).to eq(100) + # need to show where VbmsExtClaim and EndProductEstablishment are out_of_sync + # where VbmsExtClaim.Level_status_code CAN and CLR is half of the amount of EPEs that have "PEND" + expect(VbmsExtClaim.where(level_status_code: %w[CAN CLR]).count / 2).to eq(EndProductEstablishment + .where(synced_status: "PEND").count) + # where VbmsExtClaim.Level_status_code CAN and CLR is half of the amount of EPEs that have "CAN" or "CLR" + expect(VbmsExtClaim.where(level_status_code: %w[CAN CLR]).count / 2).to eq(EndProductEstablishment + .where(synced_status: %w[CAN CLR]).count) + expect(HigherLevelReview.count).to eq(50) + expect(SupplementalClaim.count).to eq(50) + expect(EndProductEstablishment.count).to eq(100) + end + end +end