From cd3e6ab26cca31d34433cc5c4ec61a3795cf7bc6 Mon Sep 17 00:00:00 2001 From: Hamish Friedlander Date: Sat, 15 Jul 2023 21:47:12 +1200 Subject: [PATCH] Add fill mode support and generally improve inpainting --- api-interfaces | 2 +- gyre/config/models/samhq.yaml | 1 - gyre/generated/generation_pb2.py | 292 ++++++++------- gyre/generated/generation_pb2.pyi | 80 +++- gyre/generated/stablecabal.openapi.json | 35 ++ gyre/generated/stablecabal.swagger.json | 38 +- gyre/manager.py | 58 +++ gyre/pipeline/inpainting/zitspp_pipeline.py | 34 +- gyre/pipeline/pipeline_wrapper.py | 11 +- gyre/pipeline/prompt_types.py | 11 + gyre/pipeline/unet/hires_fix.py | 63 +++- gyre/pipeline/unified_pipeline.py | 393 +++++++++----------- gyre/services/generate.py | 100 +++-- 13 files changed, 705 insertions(+), 413 deletions(-) diff --git a/api-interfaces b/api-interfaces index a26b481..3bfe581 160000 --- a/api-interfaces +++ b/api-interfaces @@ -1 +1 @@ -Subproject commit a26b481693d703a0d4ee23a0c44b8b663768aa5c +Subproject commit 3bfe581252a7f6bb67b455ab62952d57a396b6ef diff --git a/gyre/config/models/samhq.yaml b/gyre/config/models/samhq.yaml index ac1a471..58f56ea 100644 --- a/gyre/config/models/samhq.yaml +++ b/gyre/config/models/samhq.yaml @@ -14,7 +14,6 @@ - id: "samhq" task: "mask-predict" enabled: True - default: True class: "SamHQPipeline" model: "@empty" overrides: diff --git a/gyre/generated/generation_pb2.py b/gyre/generated/generation_pb2.py index db4667d..2c6040f 100644 --- a/gyre/generated/generation_pb2.py +++ b/gyre/generated/generation_pb2.py @@ -14,7 +14,7 @@ import tensors_pb2 as tensors__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10generation.proto\x12\x07gooseai\x1a\rtensors.proto\"/\n\x05Token\x12\x11\n\x04text\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\n\n\x02id\x18\x02 \x01(\rB\x07\n\x05_text\"T\n\x06Tokens\x12\x1e\n\x06tokens\x18\x01 \x03(\x0b\x32\x0e.gooseai.Token\x12\x19\n\x0ctokenizer_id\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0f\n\r_tokenizer_id\"X\n\x18ImageAdjustment_Gaussian\x12\r\n\x05sigma\x18\x01 \x01(\x02\x12-\n\tdirection\x18\x02 \x01(\x0e\x32\x1a.gooseai.GaussianDirection\"\x18\n\x16ImageAdjustment_Invert\"h\n\x16ImageAdjustment_Levels\x12\x11\n\tinput_low\x18\x01 \x01(\x02\x12\x12\n\ninput_high\x18\x02 \x01(\x02\x12\x12\n\noutput_low\x18\x03 \x01(\x02\x12\x13\n\x0boutput_high\x18\x04 \x01(\x02\"\xd2\x01\n\x18ImageAdjustment_Channels\x12&\n\x01r\x18\x01 \x01(\x0e\x32\x16.gooseai.ChannelSourceH\x00\x88\x01\x01\x12&\n\x01g\x18\x02 \x01(\x0e\x32\x16.gooseai.ChannelSourceH\x01\x88\x01\x01\x12&\n\x01\x62\x18\x03 \x01(\x0e\x32\x16.gooseai.ChannelSourceH\x02\x88\x01\x01\x12&\n\x01\x61\x18\x04 \x01(\x0e\x32\x16.gooseai.ChannelSourceH\x03\x88\x01\x01\x42\x04\n\x02_rB\x04\n\x02_gB\x04\n\x02_bB\x04\n\x02_a\"x\n\x17ImageAdjustment_Rescale\x12\x0e\n\x06height\x18\x01 \x01(\x04\x12\r\n\x05width\x18\x02 \x01(\x04\x12\"\n\x04mode\x18\x03 \x01(\x0e\x32\x14.gooseai.RescaleMode\x12\x1a\n\x0e\x61lgorithm_hint\x18\x04 \x03(\tB\x02\x18\x01\"}\n\x19ImageAdjustment_Autoscale\x12\x13\n\x06height\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x12\n\x05width\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12\"\n\x04mode\x18\x03 \x01(\x0e\x32\x14.gooseai.RescaleModeB\t\n\x07_heightB\x08\n\x06_width\"P\n\x14ImageAdjustment_Crop\x12\x0b\n\x03top\x18\x01 \x01(\x04\x12\x0c\n\x04left\x18\x02 \x01(\x04\x12\r\n\x05width\x18\x03 \x01(\x04\x12\x0e\n\x06height\x18\x04 \x01(\x04\"2\n\x15ImageAdjustment_Depth\x12\x19\n\x11\x64\x65pth_engine_hint\x18\x01 \x03(\t\"J\n\x19ImageAdjustment_CannyEdge\x12\x15\n\rlow_threshold\x18\x01 \x01(\x02\x12\x16\n\x0ehigh_threshold\x18\x02 \x01(\x02\"\x1f\n\x1dImageAdjustment_EdgeDetection\"\x1e\n\x1cImageAdjustment_Segmentation\"\x19\n\x17ImageAdjustment_Keypose\"\x1a\n\x18ImageAdjustment_Openpose\"\xc0\x01\n\x16ImageAdjustment_Normal\x12!\n\x14\x62\x61\x63kground_threshold\x18\x01 \x01(\x02H\x00\x88\x01\x01\x12\x14\n\x07preblur\x18\x02 \x01(\rH\x01\x88\x01\x01\x12\x15\n\x08postblur\x18\x03 \x01(\rH\x02\x88\x01\x01\x12\x16\n\tsmoothing\x18\x04 \x01(\x02H\x03\x88\x01\x01\x42\x17\n\x15_background_thresholdB\n\n\x08_preblurB\x0b\n\t_postblurB\x0c\n\n_smoothing\"p\n!ImageAdjustment_BackgroundRemoval\x12\x31\n\x04mode\x18\x01 \x01(\x0e\x32\x1e.gooseai.BackgroundRemovalModeH\x00\x88\x01\x01\x12\x0f\n\x07reapply\x18\x02 \x01(\x08\x42\x07\n\x05_mode\"=\n\x19ImageAdjustment_Palletize\x12\x14\n\x07\x63olours\x18\x01 \x01(\rH\x00\x88\x01\x01\x42\n\n\x08_colours\"-\n\x18ImageAdjustment_Quantize\x12\x11\n\tthreshold\x18\x01 \x03(\x02\"\x19\n\x17ImageAdjustment_Shuffle\"\xd6\x01\n\x1bImageAdjustment_MaskPredict\x12\x30\n\tbehaviour\x18\x01 \x01(\x0e\x32\x1d.gooseai.MaskPredictBehaviour\x12&\n\x04mode\x18\x02 \x01(\x0e\x32\x18.gooseai.MaskPredictMode\x12\x1f\n\x06prompt\x18\n \x03(\x0b\x32\x0f.gooseai.Prompt\x12\x12\n\x05\x65rode\x18\x32 \x01(\x04H\x00\x88\x01\x01\x12\x13\n\x06\x64ilate\x18\x33 \x01(\x04H\x01\x88\x01\x01\x42\x08\n\x06_erodeB\t\n\x07_dilate\"M\n\x19ImageAdjustment_MaskReuse\x12\x30\n\tbehaviour\x18\x01 \x01(\x0e\x32\x1d.gooseai.MaskPredictBehaviour\"\xa5\t\n\x0fImageAdjustment\x12\x31\n\x04\x62lur\x18\x01 \x01(\x0b\x32!.gooseai.ImageAdjustment_GaussianH\x00\x12\x31\n\x06invert\x18\x02 \x01(\x0b\x32\x1f.gooseai.ImageAdjustment_InvertH\x00\x12\x31\n\x06levels\x18\x03 \x01(\x0b\x32\x1f.gooseai.ImageAdjustment_LevelsH\x00\x12\x35\n\x08\x63hannels\x18\x04 \x01(\x0b\x32!.gooseai.ImageAdjustment_ChannelsH\x00\x12\x33\n\x07rescale\x18\x05 \x01(\x0b\x32 .gooseai.ImageAdjustment_RescaleH\x00\x12-\n\x04\x63rop\x18\x06 \x01(\x0b\x32\x1d.gooseai.ImageAdjustment_CropH\x00\x12/\n\x05\x64\x65pth\x18\x07 \x01(\x0b\x32\x1e.gooseai.ImageAdjustment_DepthH\x00\x12\x38\n\ncanny_edge\x18\x08 \x01(\x0b\x32\".gooseai.ImageAdjustment_CannyEdgeH\x00\x12@\n\x0e\x65\x64ge_detection\x18\t \x01(\x0b\x32&.gooseai.ImageAdjustment_EdgeDetectionH\x00\x12=\n\x0csegmentation\x18\n \x01(\x0b\x32%.gooseai.ImageAdjustment_SegmentationH\x00\x12\x33\n\x07keypose\x18\x0b \x01(\x0b\x32 .gooseai.ImageAdjustment_KeyposeH\x00\x12\x35\n\x08openpose\x18\x0c \x01(\x0b\x32!.gooseai.ImageAdjustment_OpenposeH\x00\x12\x31\n\x06normal\x18\r \x01(\x0b\x32\x1f.gooseai.ImageAdjustment_NormalH\x00\x12H\n\x12\x62\x61\x63kground_removal\x18\x0e \x01(\x0b\x32*.gooseai.ImageAdjustment_BackgroundRemovalH\x00\x12\x37\n\tautoscale\x18\x0f \x01(\x0b\x32\".gooseai.ImageAdjustment_AutoscaleH\x00\x12\x37\n\tpalletize\x18\x10 \x01(\x0b\x32\".gooseai.ImageAdjustment_PalletizeH\x00\x12\x35\n\x08quantize\x18\x11 \x01(\x0b\x32!.gooseai.ImageAdjustment_QuantizeH\x00\x12\x33\n\x07shuffle\x18\x12 \x01(\x0b\x32 .gooseai.ImageAdjustment_ShuffleH\x00\x12<\n\x0cmask_predict\x18\x13 \x01(\x0b\x32$.gooseai.ImageAdjustment_MaskPredictH\x00\x12\x38\n\nmask_reuse\x18\x14 \x01(\x0b\x32\".gooseai.ImageAdjustment_MaskReuseH\x00\x12\x17\n\tengine_id\x18\xc8\x01 \x01(\tH\x01\x88\x01\x01\x42\x0c\n\nadjustmentB\x0c\n\n_engine_id\"-\n\x0fSafetensorsMeta\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"A\n\x11SafetensorsTensor\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x06tensor\x18\x02 \x01(\x0b\x32\x0f.tensors.Tensor\"f\n\x0bSafetensors\x12*\n\x08metadata\x18\x01 \x03(\x0b\x32\x18.gooseai.SafetensorsMeta\x12+\n\x07tensors\x18\x02 \x03(\x0b\x32\x1a.gooseai.SafetensorsTensor\"0\n\nLoraWeight\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x0e\n\x06weight\x18\x02 \x01(\x02\"T\n\x04Lora\x12\"\n\x04lora\x18\x01 \x01(\x0b\x32\x14.gooseai.Safetensors\x12(\n\x07weights\x18\x02 \x03(\x0b\x32\x13.gooseai.LoraWeightB\x02\x18\x01\"e\n\x11\x41rtifactReference\x12\x0c\n\x02id\x18\x01 \x01(\x04H\x00\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12%\n\x05stage\x18\x03 \x01(\x0e\x32\x16.gooseai.ArtifactStageB\x0b\n\treference\"?\n\x0eTokenEmbedding\x12\x0c\n\x04text\x18\x01 \x01(\t\x12\x1f\n\x06tensor\x18\x02 \x01(\x0b\x32\x0f.tensors.Tensor\"X\n\x0c\x43\x61\x63heControl\x12\x10\n\x08\x63\x61\x63he_id\x18\x01 \x01(\t\x12\x0f\n\x07max_age\x18\x02 \x01(\r\x12%\n\x05stage\x18\x03 \x01(\x0e\x32\x16.gooseai.ArtifactStage\"\xac\x06\n\x08\x41rtifact\x12\n\n\x02id\x18\x01 \x01(\x04\x12#\n\x04type\x18\x02 \x01(\x0e\x32\x15.gooseai.ArtifactType\x12\x0c\n\x04mime\x18\x03 \x01(\t\x12\x12\n\x05magic\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x06\x62inary\x18\x05 \x01(\x0cH\x00\x12\x0e\n\x04text\x18\x06 \x01(\tH\x00\x12!\n\x06tokens\x18\x07 \x01(\x0b\x32\x0f.gooseai.TokensH\x00\x12\x33\n\nclassifier\x18\x0b \x01(\x0b\x32\x1d.gooseai.ClassifierParametersH\x00\x12!\n\x06tensor\x18\x0e \x01(\x0b\x32\x0f.tensors.TensorH\x00\x12*\n\x03ref\x18\xff\x03 \x01(\x0b\x32\x1a.gooseai.ArtifactReferenceH\x00\x12\x0e\n\x03url\x18\x81\x04 \x01(\tH\x00\x12,\n\x0bsafetensors\x18\x82\x04 \x01(\x0b\x32\x14.gooseai.SafetensorsH\x00\x12\x13\n\x08\x63\x61\x63he_id\x18\xa6\x04 \x01(\tH\x00\x12\"\n\x04lora\x18\xfe\x03 \x01(\x0b\x32\r.gooseai.LoraB\x02\x18\x01H\x00\x12\x37\n\x0ftoken_embedding\x18\x80\x04 \x01(\x0b\x32\x17.gooseai.TokenEmbeddingB\x02\x18\x01H\x00\x12\r\n\x05index\x18\x08 \x01(\r\x12,\n\rfinish_reason\x18\t \x01(\x0e\x32\x15.gooseai.FinishReason\x12\x0c\n\x04seed\x18\n \x01(\r\x12\x0c\n\x04uuid\x18\x0c \x01(\t\x12\x0c\n\x04size\x18\r \x01(\x04\x12.\n\x0b\x61\x64justments\x18\xf4\x03 \x03(\x0b\x32\x18.gooseai.ImageAdjustment\x12\x32\n\x0fpostAdjustments\x18\xf5\x03 \x03(\x0b\x32\x18.gooseai.ImageAdjustment\x12\x1d\n\x0fhint_image_type\x18\x88\x04 \x01(\tH\x02\x88\x01\x01\x12\x32\n\rcache_control\x18\xa7\x04 \x01(\x0b\x32\x15.gooseai.CacheControlH\x03\x88\x01\x01\x42\x06\n\x04\x64\x61taB\x08\n\x06_magicB\x12\n\x10_hint_image_typeB\x10\n\x0e_cache_control\"+\n\x0bNamedWeight\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06weight\x18\x02 \x01(\x02\"N\n\rTokenOverride\x12\r\n\x05token\x18\x01 \x01(\t\x12\x1b\n\x0eoriginal_token\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x11\n\x0f_original_token\"V\n\x08LOIPoint\x12\t\n\x01x\x18\x01 \x01(\x04\x12\t\n\x01y\x18\x02 \x01(\x04\x12*\n\x05label\x18\x03 \x01(\x0e\x32\x16.gooseai.LOIPointLabelH\x00\x88\x01\x01\x42\x08\n\x06_label\"H\n\x0cLOIRectangle\x12\x0b\n\x03top\x18\x01 \x01(\x04\x12\x0c\n\x04left\x18\x02 \x01(\x04\x12\x0e\n\x06\x62ottom\x18\x03 \x01(\x04\x12\r\n\x05right\x18\x04 \x01(\x04\"c\n\x13LocationsOfInterest\x12!\n\x06points\x18\x01 \x03(\x0b\x32\x11.gooseai.LOIPoint\x12)\n\nrectangles\x18\x02 \x03(\x0b\x32\x15.gooseai.LOIRectangle\"\x9d\x02\n\x10PromptParameters\x12\x11\n\x04init\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x13\n\x06weight\x18\x02 \x01(\x02H\x01\x88\x01\x01\x12,\n\rnamed_weights\x18\xf4\x03 \x03(\x0b\x32\x14.gooseai.NamedWeight\x12\x30\n\x0ftoken_overrides\x18\xf5\x03 \x03(\x0b\x32\x16.gooseai.TokenOverride\x12\x18\n\nclip_layer\x18\xf6\x03 \x01(\rH\x02\x88\x01\x01\x12\x32\n\rhint_priority\x18\xf7\x03 \x01(\x0e\x32\x15.gooseai.HintPriorityH\x03\x88\x01\x01\x42\x07\n\x05_initB\t\n\x07_weightB\r\n\x0b_clip_layerB\x10\n\x0e_hint_priority\"\xf1\x01\n\x06Prompt\x12\x32\n\nparameters\x18\x01 \x01(\x0b\x32\x19.gooseai.PromptParametersH\x01\x88\x01\x01\x12\x0e\n\x04text\x18\x02 \x01(\tH\x00\x12!\n\x06tokens\x18\x03 \x01(\x0b\x32\x0f.gooseai.TokensH\x00\x12%\n\x08\x61rtifact\x18\x04 \x01(\x0b\x32\x11.gooseai.ArtifactH\x00\x12,\n\x03loi\x18\xf5\x03 \x01(\x0b\x32\x1c.gooseai.LocationsOfInterestH\x00\x12\x12\n\techo_back\x18\xf4\x03 \x01(\x08\x42\x08\n\x06promptB\r\n\x0b_parameters\"\x85\x01\n\x0fSigmaParameters\x12\x16\n\tsigma_min\x18\x01 \x01(\x02H\x00\x88\x01\x01\x12\x16\n\tsigma_max\x18\x02 \x01(\x02H\x01\x88\x01\x01\x12\x17\n\nkarras_rho\x18\n \x01(\x02H\x02\x88\x01\x01\x42\x0c\n\n_sigma_minB\x0c\n\n_sigma_maxB\r\n\x0b_karras_rho\"n\n\rChurnSettings\x12\r\n\x05\x63hurn\x18\x01 \x01(\x02\x12\x17\n\nchurn_tmin\x18\x02 \x01(\x02H\x00\x88\x01\x01\x12\x17\n\nchurn_tmax\x18\x03 \x01(\x02H\x01\x88\x01\x01\x42\r\n\x0b_churn_tminB\r\n\x0b_churn_tmax\"\x8b\x04\n\x11SamplerParameters\x12\x10\n\x03\x65ta\x18\x01 \x01(\x02H\x00\x88\x01\x01\x12\x1b\n\x0esampling_steps\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12\x1c\n\x0flatent_channels\x18\x03 \x01(\x04H\x02\x88\x01\x01\x12 \n\x13\x64ownsampling_factor\x18\x04 \x01(\x04H\x03\x88\x01\x01\x12\x16\n\tcfg_scale\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x1d\n\x10init_noise_scale\x18\x06 \x01(\x02H\x05\x88\x01\x01\x12\x1d\n\x10step_noise_scale\x18\x07 \x01(\x02H\x06\x88\x01\x01\x12+\n\x05\x63hurn\x18\xf4\x03 \x01(\x0b\x32\x16.gooseai.ChurnSettingsH\x07\x88\x01\x01\x12-\n\x05sigma\x18\xf5\x03 \x01(\x0b\x32\x18.gooseai.SigmaParametersH\x08\x88\x01\x01\x12\x33\n\nnoise_type\x18\xf6\x03 \x01(\x0e\x32\x19.gooseai.SamplerNoiseTypeH\t\x88\x01\x01\x42\x06\n\x04_etaB\x11\n\x0f_sampling_stepsB\x12\n\x10_latent_channelsB\x16\n\x14_downsampling_factorB\x0c\n\n_cfg_scaleB\x13\n\x11_init_noise_scaleB\x13\n\x11_step_noise_scaleB\x08\n\x06_churnB\x08\n\x06_sigmaB\r\n\x0b_noise_type\"\x8b\x01\n\x15\x43onditionerParameters\x12 \n\x13vector_adjust_prior\x18\x01 \x01(\tH\x00\x88\x01\x01\x12(\n\x0b\x63onditioner\x18\x02 \x01(\x0b\x32\x0e.gooseai.ModelH\x01\x88\x01\x01\x42\x16\n\x14_vector_adjust_priorB\x0e\n\x0c_conditioner\"j\n\x12ScheduleParameters\x12\x12\n\x05start\x18\x01 \x01(\x02H\x00\x88\x01\x01\x12\x10\n\x03\x65nd\x18\x02 \x01(\x02H\x01\x88\x01\x01\x12\x12\n\x05value\x18\x03 \x01(\x02H\x02\x88\x01\x01\x42\x08\n\x06_startB\x06\n\x04_endB\x08\n\x06_value\"\xe4\x01\n\rStepParameter\x12\x13\n\x0bscaled_step\x18\x01 \x01(\x02\x12\x30\n\x07sampler\x18\x02 \x01(\x0b\x32\x1a.gooseai.SamplerParametersH\x00\x88\x01\x01\x12\x32\n\x08schedule\x18\x03 \x01(\x0b\x32\x1b.gooseai.ScheduleParametersH\x01\x88\x01\x01\x12\x32\n\x08guidance\x18\x04 \x01(\x0b\x32\x1b.gooseai.GuidanceParametersH\x02\x88\x01\x01\x42\n\n\x08_samplerB\x0b\n\t_scheduleB\x0b\n\t_guidance\"\x97\x01\n\x05Model\x12\x30\n\x0c\x61rchitecture\x18\x01 \x01(\x0e\x32\x1a.gooseai.ModelArchitecture\x12\x11\n\tpublisher\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x61taset\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\x02\x12\x18\n\x10semantic_version\x18\x05 \x01(\t\x12\r\n\x05\x61lias\x18\x06 \x01(\t\"\xbc\x01\n\x10\x43utoutParameters\x12*\n\x07\x63utouts\x18\x01 \x03(\x0b\x32\x19.gooseai.CutoutParameters\x12\x12\n\x05\x63ount\x18\x02 \x01(\rH\x00\x88\x01\x01\x12\x11\n\x04gray\x18\x03 \x01(\x02H\x01\x88\x01\x01\x12\x11\n\x04\x62lur\x18\x04 \x01(\x02H\x02\x88\x01\x01\x12\x17\n\nsize_power\x18\x05 \x01(\x02H\x03\x88\x01\x01\x42\x08\n\x06_countB\x07\n\x05_grayB\x07\n\x05_blurB\r\n\x0b_size_power\"=\n\x1aGuidanceScheduleParameters\x12\x10\n\x08\x64uration\x18\x01 \x01(\x02\x12\r\n\x05value\x18\x02 \x01(\x02\"\x97\x02\n\x1aGuidanceInstanceParameters\x12\x1e\n\x06models\x18\x02 \x03(\x0b\x32\x0e.gooseai.Model\x12\x1e\n\x11guidance_strength\x18\x03 \x01(\x02H\x00\x88\x01\x01\x12\x35\n\x08schedule\x18\x04 \x03(\x0b\x32#.gooseai.GuidanceScheduleParameters\x12/\n\x07\x63utouts\x18\x05 \x01(\x0b\x32\x19.gooseai.CutoutParametersH\x01\x88\x01\x01\x12$\n\x06prompt\x18\x06 \x01(\x0b\x32\x0f.gooseai.PromptH\x02\x88\x01\x01\x42\x14\n\x12_guidance_strengthB\n\n\x08_cutoutsB\t\n\x07_prompt\"~\n\x12GuidanceParameters\x12\x30\n\x0fguidance_preset\x18\x01 \x01(\x0e\x32\x17.gooseai.GuidancePreset\x12\x36\n\tinstances\x18\x02 \x03(\x0b\x32#.gooseai.GuidanceInstanceParameters\"n\n\rTransformType\x12.\n\tdiffusion\x18\x01 \x01(\x0e\x32\x19.gooseai.DiffusionSamplerH\x00\x12%\n\x08upscaler\x18\x02 \x01(\x0e\x32\x11.gooseai.UpscalerH\x00\x42\x06\n\x04type\"Y\n\x11\x45xtendedParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x05\x66loat\x18\x02 \x01(\x02H\x00\x12\r\n\x03int\x18\x03 \x01(\x04H\x00\x12\r\n\x03str\x18\x04 \x01(\tH\x00\x42\x07\n\x05value\"D\n\x12\x45xtendedParameters\x12.\n\nparameters\x18\x01 \x03(\x0b\x32\x1a.gooseai.ExtendedParameter\"P\n\x12HiresFixParameters\x12\x0e\n\x06\x65nable\x18\x01 \x01(\x08\x12\x19\n\x0coos_fraction\x18\x02 \x01(\x02H\x00\x88\x01\x01\x42\x0f\n\r_oos_fraction\"\xa8\x05\n\x0fImageParameters\x12\x13\n\x06height\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x12\n\x05width\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12\x0c\n\x04seed\x18\x03 \x03(\r\x12\x14\n\x07samples\x18\x04 \x01(\x04H\x02\x88\x01\x01\x12\x12\n\x05steps\x18\x05 \x01(\x04H\x03\x88\x01\x01\x12.\n\ttransform\x18\x06 \x01(\x0b\x32\x16.gooseai.TransformTypeH\x04\x88\x01\x01\x12*\n\nparameters\x18\x07 \x03(\x0b\x32\x16.gooseai.StepParameter\x12\x36\n\x10masked_area_init\x18\x08 \x01(\x0e\x32\x17.gooseai.MaskedAreaInitH\x05\x88\x01\x01\x12\x31\n\rweight_method\x18\t \x01(\x0e\x32\x15.gooseai.WeightMethodH\x06\x88\x01\x01\x12\x15\n\x08quantize\x18\n \x01(\x08H\x07\x88\x01\x01\x12\x34\n\textension\x18\xf4\x03 \x01(\x0b\x32\x1b.gooseai.ExtendedParametersH\x08\x88\x01\x01\x12\x30\n\x05hires\x18\xfe\x03 \x01(\x0b\x32\x1b.gooseai.HiresFixParametersH\t\x88\x01\x01\x12\x14\n\x06tiling\x18\x88\x04 \x01(\x08H\n\x88\x01\x01\x12\x16\n\x08tiling_x\x18\x89\x04 \x01(\x08H\x0b\x88\x01\x01\x12\x16\n\x08tiling_y\x18\x8a\x04 \x01(\x08H\x0c\x88\x01\x01\x42\t\n\x07_heightB\x08\n\x06_widthB\n\n\x08_samplesB\x08\n\x06_stepsB\x0c\n\n_transformB\x13\n\x11_masked_area_initB\x10\n\x0e_weight_methodB\x0b\n\t_quantizeB\x0c\n\n_extensionB\x08\n\x06_hiresB\t\n\x07_tilingB\x0b\n\t_tiling_xB\x0b\n\t_tiling_y\"J\n\x11\x43lassifierConcept\x12\x0f\n\x07\x63oncept\x18\x01 \x01(\t\x12\x16\n\tthreshold\x18\x02 \x01(\x02H\x00\x88\x01\x01\x42\x0c\n\n_threshold\"\xf4\x01\n\x12\x43lassifierCategory\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\x08\x63oncepts\x18\x02 \x03(\x0b\x32\x1a.gooseai.ClassifierConcept\x12\x17\n\nadjustment\x18\x03 \x01(\x02H\x00\x88\x01\x01\x12$\n\x06\x61\x63tion\x18\x04 \x01(\x0e\x32\x0f.gooseai.ActionH\x01\x88\x01\x01\x12\x35\n\x0f\x63lassifier_mode\x18\x05 \x01(\x0e\x32\x17.gooseai.ClassifierModeH\x02\x88\x01\x01\x42\r\n\x0b_adjustmentB\t\n\x07_actionB\x12\n\x10_classifier_mode\"\xb8\x01\n\x14\x43lassifierParameters\x12/\n\ncategories\x18\x01 \x03(\x0b\x32\x1b.gooseai.ClassifierCategory\x12,\n\x07\x65xceeds\x18\x02 \x03(\x0b\x32\x1b.gooseai.ClassifierCategory\x12-\n\x0frealized_action\x18\x03 \x01(\x0e\x32\x0f.gooseai.ActionH\x00\x88\x01\x01\x42\x12\n\x10_realized_action\"k\n\x0f\x41ssetParameters\x12$\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x14.gooseai.AssetAction\x12\x12\n\nproject_id\x18\x02 \x01(\t\x12\x1e\n\x03use\x18\x03 \x01(\x0e\x32\x11.gooseai.AssetUse\"\x94\x01\n\nAnswerMeta\x12\x13\n\x06gpu_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06\x63pu_id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07node_id\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tengine_id\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\t\n\x07_gpu_idB\t\n\x07_cpu_idB\n\n\x08_node_idB\x0c\n\n_engine_id\"\xa9\x01\n\x06\x41nswer\x12\x11\n\tanswer_id\x18\x01 \x01(\t\x12\x12\n\nrequest_id\x18\x02 \x01(\t\x12\x10\n\x08received\x18\x03 \x01(\x04\x12\x0f\n\x07\x63reated\x18\x04 \x01(\x04\x12&\n\x04meta\x18\x06 \x01(\x0b\x32\x13.gooseai.AnswerMetaH\x00\x88\x01\x01\x12$\n\tartifacts\x18\x07 \x03(\x0b\x32\x11.gooseai.ArtifactB\x07\n\x05_meta\"\xfc\x02\n\x07Request\x12\x11\n\tengine_id\x18\x01 \x01(\t\x12\x12\n\nrequest_id\x18\x02 \x01(\t\x12-\n\x0erequested_type\x18\x03 \x01(\x0e\x32\x15.gooseai.ArtifactType\x12\x1f\n\x06prompt\x18\x04 \x03(\x0b\x32\x0f.gooseai.Prompt\x12)\n\x05image\x18\x05 \x01(\x0b\x32\x18.gooseai.ImageParametersH\x00\x12\x33\n\nclassifier\x18\x07 \x01(\x0b\x32\x1d.gooseai.ClassifierParametersH\x00\x12)\n\x05\x61sset\x18\x08 \x01(\x0b\x32\x18.gooseai.AssetParametersH\x00\x12\x38\n\x0b\x63onditioner\x18\x06 \x01(\x0b\x32\x1e.gooseai.ConditionerParametersH\x01\x88\x01\x01\x12\x0f\n\x06\x61\x63\x63\x65pt\x18\xf4\x03 \x01(\tB\x08\n\x06paramsB\x0e\n\x0c_conditionerJ\x04\x08\t\x10\nJ\x04\x08\n\x10\x0b\"w\n\x08OnStatus\x12%\n\x06reason\x18\x01 \x03(\x0e\x32\x15.gooseai.FinishReason\x12\x13\n\x06target\x18\x02 \x01(\tH\x00\x88\x01\x01\x12$\n\x06\x61\x63tion\x18\x03 \x03(\x0e\x32\x14.gooseai.StageActionB\t\n\x07_target\"\\\n\x05Stage\x12\n\n\x02id\x18\x01 \x01(\t\x12!\n\x07request\x18\x02 \x01(\x0b\x32\x10.gooseai.Request\x12$\n\ton_status\x18\x03 \x03(\x0b\x32\x11.gooseai.OnStatus\"A\n\x0c\x43hainRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x1d\n\x05stage\x18\x02 \x03(\x0b\x32\x0e.gooseai.Stage\",\n\x0b\x41syncStatus\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\"f\n\x0b\x41syncAnswer\x12\x1f\n\x06\x61nswer\x18\x01 \x03(\x0b\x32\x0f.gooseai.Answer\x12\x10\n\x08\x63omplete\x18\x02 \x01(\x08\x12$\n\x06status\x18\x03 \x01(\x0b\x32\x14.gooseai.AsyncStatus\"7\n\x0b\x41syncHandle\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x14\n\x0c\x61sync_handle\x18\x02 \x01(\t\"\x13\n\x11\x41syncCancelAnswer*E\n\x0c\x46inishReason\x12\x08\n\x04NULL\x10\x00\x12\n\n\x06LENGTH\x10\x01\x12\x08\n\x04STOP\x10\x02\x12\t\n\x05\x45RROR\x10\x03\x12\n\n\x06\x46ILTER\x10\x04*\xc6\x02\n\x0c\x41rtifactType\x12\x11\n\rARTIFACT_NONE\x10\x00\x12\x12\n\x0e\x41RTIFACT_IMAGE\x10\x01\x12\x12\n\x0e\x41RTIFACT_VIDEO\x10\x02\x12\x11\n\rARTIFACT_TEXT\x10\x03\x12\x13\n\x0f\x41RTIFACT_TOKENS\x10\x04\x12\x16\n\x12\x41RTIFACT_EMBEDDING\x10\x05\x12\x1c\n\x18\x41RTIFACT_CLASSIFICATIONS\x10\x06\x12\x11\n\rARTIFACT_MASK\x10\x07\x12\x13\n\x0f\x41RTIFACT_LATENT\x10\x08\x12\x13\n\x0f\x41RTIFACT_TENSOR\x10\t\x12\x12\n\rARTIFACT_LORA\x10\xf4\x03\x12\x13\n\x0e\x41RTIFACT_DEPTH\x10\xf5\x03\x12\x1d\n\x18\x41RTIFACT_TOKEN_EMBEDDING\x10\xf6\x03\x12\x18\n\x13\x41RTIFACT_HINT_IMAGE\x10\xf7\x03*M\n\x11GaussianDirection\x12\x12\n\x0e\x44IRECTION_NONE\x10\x00\x12\x10\n\x0c\x44IRECTION_UP\x10\x01\x12\x12\n\x0e\x44IRECTION_DOWN\x10\x02*\x83\x01\n\rChannelSource\x12\r\n\tCHANNEL_R\x10\x00\x12\r\n\tCHANNEL_G\x10\x01\x12\r\n\tCHANNEL_B\x10\x02\x12\r\n\tCHANNEL_A\x10\x03\x12\x10\n\x0c\x43HANNEL_ZERO\x10\x04\x12\x0f\n\x0b\x43HANNEL_ONE\x10\x05\x12\x13\n\x0f\x43HANNEL_DISCARD\x10\x06*\x8a\x01\n\x0bRescaleMode\x12\x12\n\x0eRESCALE_STRICT\x10\x00\x12\x11\n\rRESCALE_COVER\x10\x02\x12\x18\n\x14RESCALE_CONTAIN_ZERO\x10\x03\x12\x1d\n\x19RESCALE_CONTAIN_REPLICATE\x10\x04\x12\x1b\n\x17RESCALE_CONTAIN_REFLECT\x10\x05*D\n\x15\x42\x61\x63kgroundRemovalMode\x12\t\n\x05\x41LPHA\x10\x00\x12\t\n\x05SOLID\x10\x01\x12\x08\n\x04\x42LUR\x10\x02\x12\x0b\n\x07NOTHING\x10\x03*u\n\x14MaskPredictBehaviour\x12\x11\n\rMASK_AS_ALPHA\x10\x00\x12\x13\n\x0fMASK_OVER_SOLID\x10\x01\x12\x12\n\x0eMASK_OVER_BLUR\x10\x02\x12\x13\n\x0fMASK_DO_NOTHING\x10\x03\x12\x0c\n\x08MASK_USE\x10\x04*w\n\x0fMaskPredictMode\x12\x11\n\rPREDICT_MATTE\x10\x00\x12\x10\n\x0cPREDICT_MASK\x10\x01\x12\x14\n\x10PREDICT_OVERMASK\x10\x02\x12\x15\n\x11PREDICT_UNDERMASK\x10\x03\x12\x12\n\x0ePREDICT_TRIMAP\x10\x04*t\n\rArtifactStage\x12\x1f\n\x1b\x41RTIFACT_BEFORE_ADJUSTMENTS\x10\x00\x12\x1e\n\x1a\x41RTIFACT_AFTER_ADJUSTMENTS\x10\x01\x12\"\n\x1e\x41RTIFACT_AFTER_POSTADJUSTMENTS\x10\x02*g\n\x0eMaskedAreaInit\x12\x19\n\x15MASKED_AREA_INIT_ZERO\x10\x00\x12\x1b\n\x17MASKED_AREA_INIT_RANDOM\x10\x01\x12\x1d\n\x19MASKED_AREA_INIT_ORIGINAL\x10\x02*5\n\x0cWeightMethod\x12\x10\n\x0cTEXT_ENCODER\x10\x00\x12\x13\n\x0f\x43ROSS_ATTENTION\x10\x01*j\n\x0cHintPriority\x12\x11\n\rHINT_BALANCED\x10\x00\x12\x1a\n\x16HINT_PRIORITISE_PROMPT\x10\x01\x12\x18\n\x14HINT_PRIORITISE_HINT\x10\x02\x12\x11\n\rHINT_ADAPTIVE\x10\x03*;\n\rLOIPointLabel\x12\x14\n\x10POINT_BACKGROUND\x10\x00\x12\x14\n\x10POINT_FOREGROUND\x10\x01*\x9b\x04\n\x10\x44iffusionSampler\x12\x10\n\x0cSAMPLER_DDIM\x10\x00\x12\x10\n\x0cSAMPLER_DDPM\x10\x01\x12\x13\n\x0fSAMPLER_K_EULER\x10\x02\x12\x1d\n\x19SAMPLER_K_EULER_ANCESTRAL\x10\x03\x12\x12\n\x0eSAMPLER_K_HEUN\x10\x04\x12\x13\n\x0fSAMPLER_K_DPM_2\x10\x05\x12\x1d\n\x19SAMPLER_K_DPM_2_ANCESTRAL\x10\x06\x12\x11\n\rSAMPLER_K_LMS\x10\x07\x12 \n\x1cSAMPLER_K_DPMPP_2S_ANCESTRAL\x10\x08\x12\x16\n\x12SAMPLER_K_DPMPP_2M\x10\t\x12\x17\n\x13SAMPLER_K_DPMPP_SDE\x10\n\x12\x1f\n\x1aSAMPLER_DPMSOLVERPP_1ORDER\x10\xf4\x03\x12\x1f\n\x1aSAMPLER_DPMSOLVERPP_2ORDER\x10\xf5\x03\x12\x1f\n\x1aSAMPLER_DPMSOLVERPP_3ORDER\x10\xf6\x03\x12\x15\n\x10SAMPLER_DPM_FAST\x10\xa6\x04\x12\x19\n\x14SAMPLER_DPM_ADAPTIVE\x10\xa7\x04\x12)\n SAMPLER_DPMSOLVERPP_2S_ANCESTRAL\x10\xa8\x04\x1a\x02\x08\x01\x12 \n\x17SAMPLER_DPMSOLVERPP_SDE\x10\xa9\x04\x1a\x02\x08\x01\x12\x1f\n\x16SAMPLER_DPMSOLVERPP_2M\x10\xaa\x04\x1a\x02\x08\x01*H\n\x10SamplerNoiseType\x12\x18\n\x14SAMPLER_NOISE_NORMAL\x10\x00\x12\x1a\n\x16SAMPLER_NOISE_BROWNIAN\x10\x01*F\n\x08Upscaler\x12\x10\n\x0cUPSCALER_RGB\x10\x00\x12\x13\n\x0fUPSCALER_GFPGAN\x10\x01\x12\x13\n\x0fUPSCALER_ESRGAN\x10\x02*\xd8\x01\n\x0eGuidancePreset\x12\x18\n\x14GUIDANCE_PRESET_NONE\x10\x00\x12\x1a\n\x16GUIDANCE_PRESET_SIMPLE\x10\x01\x12\x1d\n\x19GUIDANCE_PRESET_FAST_BLUE\x10\x02\x12\x1e\n\x1aGUIDANCE_PRESET_FAST_GREEN\x10\x03\x12\x18\n\x14GUIDANCE_PRESET_SLOW\x10\x04\x12\x1a\n\x16GUIDANCE_PRESET_SLOWER\x10\x05\x12\x1b\n\x17GUIDANCE_PRESET_SLOWEST\x10\x06*\x91\x01\n\x11ModelArchitecture\x12\x1b\n\x17MODEL_ARCHITECTURE_NONE\x10\x00\x12\x1f\n\x1bMODEL_ARCHITECTURE_CLIP_VIT\x10\x01\x12\"\n\x1eMODEL_ARCHITECTURE_CLIP_RESNET\x10\x02\x12\x1a\n\x16MODEL_ARCHITECTURE_LDM\x10\x03*\xa2\x01\n\x06\x41\x63tion\x12\x16\n\x12\x41\x43TION_PASSTHROUGH\x10\x00\x12\x1f\n\x1b\x41\x43TION_REGENERATE_DUPLICATE\x10\x01\x12\x15\n\x11\x41\x43TION_REGENERATE\x10\x02\x12\x1e\n\x1a\x41\x43TION_OBFUSCATE_DUPLICATE\x10\x03\x12\x14\n\x10\x41\x43TION_OBFUSCATE\x10\x04\x12\x12\n\x0e\x41\x43TION_DISCARD\x10\x05*D\n\x0e\x43lassifierMode\x12\x17\n\x13\x43LSFR_MODE_ZEROSHOT\x10\x00\x12\x19\n\x15\x43LSFR_MODE_MULTICLASS\x10\x01*=\n\x0b\x41ssetAction\x12\r\n\tASSET_PUT\x10\x00\x12\r\n\tASSET_GET\x10\x01\x12\x10\n\x0c\x41SSET_DELETE\x10\x02*\x81\x01\n\x08\x41ssetUse\x12\x17\n\x13\x41SSET_USE_UNDEFINED\x10\x00\x12\x13\n\x0f\x41SSET_USE_INPUT\x10\x01\x12\x14\n\x10\x41SSET_USE_OUTPUT\x10\x02\x12\x1a\n\x16\x41SSET_USE_INTERMEDIATE\x10\x03\x12\x15\n\x11\x41SSET_USE_PROJECT\x10\x04*W\n\x0bStageAction\x12\x15\n\x11STAGE_ACTION_PASS\x10\x00\x12\x18\n\x14STAGE_ACTION_DISCARD\x10\x01\x12\x17\n\x13STAGE_ACTION_RETURN\x10\x02\x32\xbe\x02\n\x11GenerationService\x12\x31\n\x08Generate\x12\x10.gooseai.Request\x1a\x0f.gooseai.Answer\"\x00\x30\x01\x12;\n\rChainGenerate\x12\x15.gooseai.ChainRequest\x1a\x0f.gooseai.Answer\"\x00\x30\x01\x12\x39\n\rAsyncGenerate\x12\x10.gooseai.Request\x1a\x14.gooseai.AsyncHandle\"\x00\x12;\n\x0b\x41syncResult\x12\x14.gooseai.AsyncHandle\x1a\x14.gooseai.AsyncAnswer\"\x00\x12\x41\n\x0b\x41syncCancel\x12\x14.gooseai.AsyncHandle\x1a\x1a.gooseai.AsyncCancelAnswer\"\x00\x42;Z9github.com/stability-ai/api-interfaces/gooseai/generationb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10generation.proto\x12\x07gooseai\x1a\rtensors.proto\"/\n\x05Token\x12\x11\n\x04text\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\n\n\x02id\x18\x02 \x01(\rB\x07\n\x05_text\"T\n\x06Tokens\x12\x1e\n\x06tokens\x18\x01 \x03(\x0b\x32\x0e.gooseai.Token\x12\x19\n\x0ctokenizer_id\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0f\n\r_tokenizer_id\"X\n\x18ImageAdjustment_Gaussian\x12\r\n\x05sigma\x18\x01 \x01(\x02\x12-\n\tdirection\x18\x02 \x01(\x0e\x32\x1a.gooseai.GaussianDirection\"\x18\n\x16ImageAdjustment_Invert\"h\n\x16ImageAdjustment_Levels\x12\x11\n\tinput_low\x18\x01 \x01(\x02\x12\x12\n\ninput_high\x18\x02 \x01(\x02\x12\x12\n\noutput_low\x18\x03 \x01(\x02\x12\x13\n\x0boutput_high\x18\x04 \x01(\x02\"\xd2\x01\n\x18ImageAdjustment_Channels\x12&\n\x01r\x18\x01 \x01(\x0e\x32\x16.gooseai.ChannelSourceH\x00\x88\x01\x01\x12&\n\x01g\x18\x02 \x01(\x0e\x32\x16.gooseai.ChannelSourceH\x01\x88\x01\x01\x12&\n\x01\x62\x18\x03 \x01(\x0e\x32\x16.gooseai.ChannelSourceH\x02\x88\x01\x01\x12&\n\x01\x61\x18\x04 \x01(\x0e\x32\x16.gooseai.ChannelSourceH\x03\x88\x01\x01\x42\x04\n\x02_rB\x04\n\x02_gB\x04\n\x02_bB\x04\n\x02_a\"x\n\x17ImageAdjustment_Rescale\x12\x0e\n\x06height\x18\x01 \x01(\x04\x12\r\n\x05width\x18\x02 \x01(\x04\x12\"\n\x04mode\x18\x03 \x01(\x0e\x32\x14.gooseai.RescaleMode\x12\x1a\n\x0e\x61lgorithm_hint\x18\x04 \x03(\tB\x02\x18\x01\"}\n\x19ImageAdjustment_Autoscale\x12\x13\n\x06height\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x12\n\x05width\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12\"\n\x04mode\x18\x03 \x01(\x0e\x32\x14.gooseai.RescaleModeB\t\n\x07_heightB\x08\n\x06_width\"P\n\x14ImageAdjustment_Crop\x12\x0b\n\x03top\x18\x01 \x01(\x04\x12\x0c\n\x04left\x18\x02 \x01(\x04\x12\r\n\x05width\x18\x03 \x01(\x04\x12\x0e\n\x06height\x18\x04 \x01(\x04\"2\n\x15ImageAdjustment_Depth\x12\x19\n\x11\x64\x65pth_engine_hint\x18\x01 \x03(\t\"J\n\x19ImageAdjustment_CannyEdge\x12\x15\n\rlow_threshold\x18\x01 \x01(\x02\x12\x16\n\x0ehigh_threshold\x18\x02 \x01(\x02\"\x1f\n\x1dImageAdjustment_EdgeDetection\"\x1e\n\x1cImageAdjustment_Segmentation\"\x19\n\x17ImageAdjustment_Keypose\"\x1a\n\x18ImageAdjustment_Openpose\"\xc0\x01\n\x16ImageAdjustment_Normal\x12!\n\x14\x62\x61\x63kground_threshold\x18\x01 \x01(\x02H\x00\x88\x01\x01\x12\x14\n\x07preblur\x18\x02 \x01(\rH\x01\x88\x01\x01\x12\x15\n\x08postblur\x18\x03 \x01(\rH\x02\x88\x01\x01\x12\x16\n\tsmoothing\x18\x04 \x01(\x02H\x03\x88\x01\x01\x42\x17\n\x15_background_thresholdB\n\n\x08_preblurB\x0b\n\t_postblurB\x0c\n\n_smoothing\"p\n!ImageAdjustment_BackgroundRemoval\x12\x31\n\x04mode\x18\x01 \x01(\x0e\x32\x1e.gooseai.BackgroundRemovalModeH\x00\x88\x01\x01\x12\x0f\n\x07reapply\x18\x02 \x01(\x08\x42\x07\n\x05_mode\"=\n\x19ImageAdjustment_Palletize\x12\x14\n\x07\x63olours\x18\x01 \x01(\rH\x00\x88\x01\x01\x42\n\n\x08_colours\"-\n\x18ImageAdjustment_Quantize\x12\x11\n\tthreshold\x18\x01 \x03(\x02\"\x19\n\x17ImageAdjustment_Shuffle\"\xd6\x01\n\x1bImageAdjustment_MaskPredict\x12\x30\n\tbehaviour\x18\x01 \x01(\x0e\x32\x1d.gooseai.MaskPredictBehaviour\x12&\n\x04mode\x18\x02 \x01(\x0e\x32\x18.gooseai.MaskPredictMode\x12\x1f\n\x06prompt\x18\n \x03(\x0b\x32\x0f.gooseai.Prompt\x12\x12\n\x05\x65rode\x18\x32 \x01(\x04H\x00\x88\x01\x01\x12\x13\n\x06\x64ilate\x18\x33 \x01(\x04H\x01\x88\x01\x01\x42\x08\n\x06_erodeB\t\n\x07_dilate\"M\n\x19ImageAdjustment_MaskReuse\x12\x30\n\tbehaviour\x18\x01 \x01(\x0e\x32\x1d.gooseai.MaskPredictBehaviour\">\n\x1eImageAdjustment_MaskSoftDilate\x12\x12\n\x05sigma\x18\x01 \x01(\x04H\x00\x88\x01\x01\x42\x08\n\x06_sigma\"\xea\t\n\x0fImageAdjustment\x12\x31\n\x04\x62lur\x18\x01 \x01(\x0b\x32!.gooseai.ImageAdjustment_GaussianH\x00\x12\x31\n\x06invert\x18\x02 \x01(\x0b\x32\x1f.gooseai.ImageAdjustment_InvertH\x00\x12\x31\n\x06levels\x18\x03 \x01(\x0b\x32\x1f.gooseai.ImageAdjustment_LevelsH\x00\x12\x35\n\x08\x63hannels\x18\x04 \x01(\x0b\x32!.gooseai.ImageAdjustment_ChannelsH\x00\x12\x33\n\x07rescale\x18\x05 \x01(\x0b\x32 .gooseai.ImageAdjustment_RescaleH\x00\x12-\n\x04\x63rop\x18\x06 \x01(\x0b\x32\x1d.gooseai.ImageAdjustment_CropH\x00\x12/\n\x05\x64\x65pth\x18\x07 \x01(\x0b\x32\x1e.gooseai.ImageAdjustment_DepthH\x00\x12\x38\n\ncanny_edge\x18\x08 \x01(\x0b\x32\".gooseai.ImageAdjustment_CannyEdgeH\x00\x12@\n\x0e\x65\x64ge_detection\x18\t \x01(\x0b\x32&.gooseai.ImageAdjustment_EdgeDetectionH\x00\x12=\n\x0csegmentation\x18\n \x01(\x0b\x32%.gooseai.ImageAdjustment_SegmentationH\x00\x12\x33\n\x07keypose\x18\x0b \x01(\x0b\x32 .gooseai.ImageAdjustment_KeyposeH\x00\x12\x35\n\x08openpose\x18\x0c \x01(\x0b\x32!.gooseai.ImageAdjustment_OpenposeH\x00\x12\x31\n\x06normal\x18\r \x01(\x0b\x32\x1f.gooseai.ImageAdjustment_NormalH\x00\x12H\n\x12\x62\x61\x63kground_removal\x18\x0e \x01(\x0b\x32*.gooseai.ImageAdjustment_BackgroundRemovalH\x00\x12\x37\n\tautoscale\x18\x0f \x01(\x0b\x32\".gooseai.ImageAdjustment_AutoscaleH\x00\x12\x37\n\tpalletize\x18\x10 \x01(\x0b\x32\".gooseai.ImageAdjustment_PalletizeH\x00\x12\x35\n\x08quantize\x18\x11 \x01(\x0b\x32!.gooseai.ImageAdjustment_QuantizeH\x00\x12\x33\n\x07shuffle\x18\x12 \x01(\x0b\x32 .gooseai.ImageAdjustment_ShuffleH\x00\x12<\n\x0cmask_predict\x18\x13 \x01(\x0b\x32$.gooseai.ImageAdjustment_MaskPredictH\x00\x12\x38\n\nmask_reuse\x18\x14 \x01(\x0b\x32\".gooseai.ImageAdjustment_MaskReuseH\x00\x12\x43\n\x10mask_soft_dilate\x18\x15 \x01(\x0b\x32\'.gooseai.ImageAdjustment_MaskSoftDilateH\x00\x12\x17\n\tengine_id\x18\xc8\x01 \x01(\tH\x01\x88\x01\x01\x42\x0c\n\nadjustmentB\x0c\n\n_engine_id\"-\n\x0fSafetensorsMeta\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"A\n\x11SafetensorsTensor\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x06tensor\x18\x02 \x01(\x0b\x32\x0f.tensors.Tensor\"f\n\x0bSafetensors\x12*\n\x08metadata\x18\x01 \x03(\x0b\x32\x18.gooseai.SafetensorsMeta\x12+\n\x07tensors\x18\x02 \x03(\x0b\x32\x1a.gooseai.SafetensorsTensor\"0\n\nLoraWeight\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x0e\n\x06weight\x18\x02 \x01(\x02\"T\n\x04Lora\x12\"\n\x04lora\x18\x01 \x01(\x0b\x32\x14.gooseai.Safetensors\x12(\n\x07weights\x18\x02 \x03(\x0b\x32\x13.gooseai.LoraWeightB\x02\x18\x01\"e\n\x11\x41rtifactReference\x12\x0c\n\x02id\x18\x01 \x01(\x04H\x00\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x12%\n\x05stage\x18\x03 \x01(\x0e\x32\x16.gooseai.ArtifactStageB\x0b\n\treference\"?\n\x0eTokenEmbedding\x12\x0c\n\x04text\x18\x01 \x01(\t\x12\x1f\n\x06tensor\x18\x02 \x01(\x0b\x32\x0f.tensors.Tensor\"X\n\x0c\x43\x61\x63heControl\x12\x10\n\x08\x63\x61\x63he_id\x18\x01 \x01(\t\x12\x0f\n\x07max_age\x18\x02 \x01(\r\x12%\n\x05stage\x18\x03 \x01(\x0e\x32\x16.gooseai.ArtifactStage\"\xac\x06\n\x08\x41rtifact\x12\n\n\x02id\x18\x01 \x01(\x04\x12#\n\x04type\x18\x02 \x01(\x0e\x32\x15.gooseai.ArtifactType\x12\x0c\n\x04mime\x18\x03 \x01(\t\x12\x12\n\x05magic\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x06\x62inary\x18\x05 \x01(\x0cH\x00\x12\x0e\n\x04text\x18\x06 \x01(\tH\x00\x12!\n\x06tokens\x18\x07 \x01(\x0b\x32\x0f.gooseai.TokensH\x00\x12\x33\n\nclassifier\x18\x0b \x01(\x0b\x32\x1d.gooseai.ClassifierParametersH\x00\x12!\n\x06tensor\x18\x0e \x01(\x0b\x32\x0f.tensors.TensorH\x00\x12*\n\x03ref\x18\xff\x03 \x01(\x0b\x32\x1a.gooseai.ArtifactReferenceH\x00\x12\x0e\n\x03url\x18\x81\x04 \x01(\tH\x00\x12,\n\x0bsafetensors\x18\x82\x04 \x01(\x0b\x32\x14.gooseai.SafetensorsH\x00\x12\x13\n\x08\x63\x61\x63he_id\x18\xa6\x04 \x01(\tH\x00\x12\"\n\x04lora\x18\xfe\x03 \x01(\x0b\x32\r.gooseai.LoraB\x02\x18\x01H\x00\x12\x37\n\x0ftoken_embedding\x18\x80\x04 \x01(\x0b\x32\x17.gooseai.TokenEmbeddingB\x02\x18\x01H\x00\x12\r\n\x05index\x18\x08 \x01(\r\x12,\n\rfinish_reason\x18\t \x01(\x0e\x32\x15.gooseai.FinishReason\x12\x0c\n\x04seed\x18\n \x01(\r\x12\x0c\n\x04uuid\x18\x0c \x01(\t\x12\x0c\n\x04size\x18\r \x01(\x04\x12.\n\x0b\x61\x64justments\x18\xf4\x03 \x03(\x0b\x32\x18.gooseai.ImageAdjustment\x12\x32\n\x0fpostAdjustments\x18\xf5\x03 \x03(\x0b\x32\x18.gooseai.ImageAdjustment\x12\x1d\n\x0fhint_image_type\x18\x88\x04 \x01(\tH\x02\x88\x01\x01\x12\x32\n\rcache_control\x18\xa7\x04 \x01(\x0b\x32\x15.gooseai.CacheControlH\x03\x88\x01\x01\x42\x06\n\x04\x64\x61taB\x08\n\x06_magicB\x12\n\x10_hint_image_typeB\x10\n\x0e_cache_control\"+\n\x0bNamedWeight\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06weight\x18\x02 \x01(\x02\"N\n\rTokenOverride\x12\r\n\x05token\x18\x01 \x01(\t\x12\x1b\n\x0eoriginal_token\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x11\n\x0f_original_token\"V\n\x08LOIPoint\x12\t\n\x01x\x18\x01 \x01(\x04\x12\t\n\x01y\x18\x02 \x01(\x04\x12*\n\x05label\x18\x03 \x01(\x0e\x32\x16.gooseai.LOIPointLabelH\x00\x88\x01\x01\x42\x08\n\x06_label\"H\n\x0cLOIRectangle\x12\x0b\n\x03top\x18\x01 \x01(\x04\x12\x0c\n\x04left\x18\x02 \x01(\x04\x12\x0e\n\x06\x62ottom\x18\x03 \x01(\x04\x12\r\n\x05right\x18\x04 \x01(\x04\"c\n\x13LocationsOfInterest\x12!\n\x06points\x18\x01 \x03(\x0b\x32\x11.gooseai.LOIPoint\x12)\n\nrectangles\x18\x02 \x03(\x0b\x32\x15.gooseai.LOIRectangle\"S\n\x11InpaintParameters\x12\x30\n\tfill_mode\x18\x01 \x01(\x0e\x32\x18.gooseai.InpaintFillModeH\x00\x88\x01\x01\x42\x0c\n\n_fill_mode\"\xf2\x02\n\x10PromptParameters\x12\x11\n\x04init\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x13\n\x06weight\x18\x02 \x01(\x02H\x01\x88\x01\x01\x12,\n\rnamed_weights\x18\xf4\x03 \x03(\x0b\x32\x14.gooseai.NamedWeight\x12\x30\n\x0ftoken_overrides\x18\xf5\x03 \x03(\x0b\x32\x16.gooseai.TokenOverride\x12\x18\n\nclip_layer\x18\xf6\x03 \x01(\rH\x02\x88\x01\x01\x12\x32\n\rhint_priority\x18\xf7\x03 \x01(\x0e\x32\x15.gooseai.HintPriorityH\x03\x88\x01\x01\x12<\n\x12inpaint_parameters\x18\xf8\x03 \x01(\x0b\x32\x1a.gooseai.InpaintParametersH\x04\x88\x01\x01\x42\x07\n\x05_initB\t\n\x07_weightB\r\n\x0b_clip_layerB\x10\n\x0e_hint_priorityB\x15\n\x13_inpaint_parameters\"\xf1\x01\n\x06Prompt\x12\x32\n\nparameters\x18\x01 \x01(\x0b\x32\x19.gooseai.PromptParametersH\x01\x88\x01\x01\x12\x0e\n\x04text\x18\x02 \x01(\tH\x00\x12!\n\x06tokens\x18\x03 \x01(\x0b\x32\x0f.gooseai.TokensH\x00\x12%\n\x08\x61rtifact\x18\x04 \x01(\x0b\x32\x11.gooseai.ArtifactH\x00\x12,\n\x03loi\x18\xf5\x03 \x01(\x0b\x32\x1c.gooseai.LocationsOfInterestH\x00\x12\x12\n\techo_back\x18\xf4\x03 \x01(\x08\x42\x08\n\x06promptB\r\n\x0b_parameters\"\x85\x01\n\x0fSigmaParameters\x12\x16\n\tsigma_min\x18\x01 \x01(\x02H\x00\x88\x01\x01\x12\x16\n\tsigma_max\x18\x02 \x01(\x02H\x01\x88\x01\x01\x12\x17\n\nkarras_rho\x18\n \x01(\x02H\x02\x88\x01\x01\x42\x0c\n\n_sigma_minB\x0c\n\n_sigma_maxB\r\n\x0b_karras_rho\"n\n\rChurnSettings\x12\r\n\x05\x63hurn\x18\x01 \x01(\x02\x12\x17\n\nchurn_tmin\x18\x02 \x01(\x02H\x00\x88\x01\x01\x12\x17\n\nchurn_tmax\x18\x03 \x01(\x02H\x01\x88\x01\x01\x42\r\n\x0b_churn_tminB\r\n\x0b_churn_tmax\"\x8b\x04\n\x11SamplerParameters\x12\x10\n\x03\x65ta\x18\x01 \x01(\x02H\x00\x88\x01\x01\x12\x1b\n\x0esampling_steps\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12\x1c\n\x0flatent_channels\x18\x03 \x01(\x04H\x02\x88\x01\x01\x12 \n\x13\x64ownsampling_factor\x18\x04 \x01(\x04H\x03\x88\x01\x01\x12\x16\n\tcfg_scale\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x1d\n\x10init_noise_scale\x18\x06 \x01(\x02H\x05\x88\x01\x01\x12\x1d\n\x10step_noise_scale\x18\x07 \x01(\x02H\x06\x88\x01\x01\x12+\n\x05\x63hurn\x18\xf4\x03 \x01(\x0b\x32\x16.gooseai.ChurnSettingsH\x07\x88\x01\x01\x12-\n\x05sigma\x18\xf5\x03 \x01(\x0b\x32\x18.gooseai.SigmaParametersH\x08\x88\x01\x01\x12\x33\n\nnoise_type\x18\xf6\x03 \x01(\x0e\x32\x19.gooseai.SamplerNoiseTypeH\t\x88\x01\x01\x42\x06\n\x04_etaB\x11\n\x0f_sampling_stepsB\x12\n\x10_latent_channelsB\x16\n\x14_downsampling_factorB\x0c\n\n_cfg_scaleB\x13\n\x11_init_noise_scaleB\x13\n\x11_step_noise_scaleB\x08\n\x06_churnB\x08\n\x06_sigmaB\r\n\x0b_noise_type\"\x8b\x01\n\x15\x43onditionerParameters\x12 \n\x13vector_adjust_prior\x18\x01 \x01(\tH\x00\x88\x01\x01\x12(\n\x0b\x63onditioner\x18\x02 \x01(\x0b\x32\x0e.gooseai.ModelH\x01\x88\x01\x01\x42\x16\n\x14_vector_adjust_priorB\x0e\n\x0c_conditioner\"j\n\x12ScheduleParameters\x12\x12\n\x05start\x18\x01 \x01(\x02H\x00\x88\x01\x01\x12\x10\n\x03\x65nd\x18\x02 \x01(\x02H\x01\x88\x01\x01\x12\x12\n\x05value\x18\x03 \x01(\x02H\x02\x88\x01\x01\x42\x08\n\x06_startB\x06\n\x04_endB\x08\n\x06_value\"\xe4\x01\n\rStepParameter\x12\x13\n\x0bscaled_step\x18\x01 \x01(\x02\x12\x30\n\x07sampler\x18\x02 \x01(\x0b\x32\x1a.gooseai.SamplerParametersH\x00\x88\x01\x01\x12\x32\n\x08schedule\x18\x03 \x01(\x0b\x32\x1b.gooseai.ScheduleParametersH\x01\x88\x01\x01\x12\x32\n\x08guidance\x18\x04 \x01(\x0b\x32\x1b.gooseai.GuidanceParametersH\x02\x88\x01\x01\x42\n\n\x08_samplerB\x0b\n\t_scheduleB\x0b\n\t_guidance\"\x97\x01\n\x05Model\x12\x30\n\x0c\x61rchitecture\x18\x01 \x01(\x0e\x32\x1a.gooseai.ModelArchitecture\x12\x11\n\tpublisher\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x61taset\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\x02\x12\x18\n\x10semantic_version\x18\x05 \x01(\t\x12\r\n\x05\x61lias\x18\x06 \x01(\t\"\xbc\x01\n\x10\x43utoutParameters\x12*\n\x07\x63utouts\x18\x01 \x03(\x0b\x32\x19.gooseai.CutoutParameters\x12\x12\n\x05\x63ount\x18\x02 \x01(\rH\x00\x88\x01\x01\x12\x11\n\x04gray\x18\x03 \x01(\x02H\x01\x88\x01\x01\x12\x11\n\x04\x62lur\x18\x04 \x01(\x02H\x02\x88\x01\x01\x12\x17\n\nsize_power\x18\x05 \x01(\x02H\x03\x88\x01\x01\x42\x08\n\x06_countB\x07\n\x05_grayB\x07\n\x05_blurB\r\n\x0b_size_power\"=\n\x1aGuidanceScheduleParameters\x12\x10\n\x08\x64uration\x18\x01 \x01(\x02\x12\r\n\x05value\x18\x02 \x01(\x02\"\x97\x02\n\x1aGuidanceInstanceParameters\x12\x1e\n\x06models\x18\x02 \x03(\x0b\x32\x0e.gooseai.Model\x12\x1e\n\x11guidance_strength\x18\x03 \x01(\x02H\x00\x88\x01\x01\x12\x35\n\x08schedule\x18\x04 \x03(\x0b\x32#.gooseai.GuidanceScheduleParameters\x12/\n\x07\x63utouts\x18\x05 \x01(\x0b\x32\x19.gooseai.CutoutParametersH\x01\x88\x01\x01\x12$\n\x06prompt\x18\x06 \x01(\x0b\x32\x0f.gooseai.PromptH\x02\x88\x01\x01\x42\x14\n\x12_guidance_strengthB\n\n\x08_cutoutsB\t\n\x07_prompt\"~\n\x12GuidanceParameters\x12\x30\n\x0fguidance_preset\x18\x01 \x01(\x0e\x32\x17.gooseai.GuidancePreset\x12\x36\n\tinstances\x18\x02 \x03(\x0b\x32#.gooseai.GuidanceInstanceParameters\"n\n\rTransformType\x12.\n\tdiffusion\x18\x01 \x01(\x0e\x32\x19.gooseai.DiffusionSamplerH\x00\x12%\n\x08upscaler\x18\x02 \x01(\x0e\x32\x11.gooseai.UpscalerH\x00\x42\x06\n\x04type\"Y\n\x11\x45xtendedParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x05\x66loat\x18\x02 \x01(\x02H\x00\x12\r\n\x03int\x18\x03 \x01(\x04H\x00\x12\r\n\x03str\x18\x04 \x01(\tH\x00\x42\x07\n\x05value\"D\n\x12\x45xtendedParameters\x12.\n\nparameters\x18\x01 \x03(\x0b\x32\x1a.gooseai.ExtendedParameter\"P\n\x12HiresFixParameters\x12\x0e\n\x06\x65nable\x18\x01 \x01(\x08\x12\x19\n\x0coos_fraction\x18\x02 \x01(\x02H\x00\x88\x01\x01\x42\x0f\n\r_oos_fraction\"\xa8\x05\n\x0fImageParameters\x12\x13\n\x06height\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x12\n\x05width\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12\x0c\n\x04seed\x18\x03 \x03(\r\x12\x14\n\x07samples\x18\x04 \x01(\x04H\x02\x88\x01\x01\x12\x12\n\x05steps\x18\x05 \x01(\x04H\x03\x88\x01\x01\x12.\n\ttransform\x18\x06 \x01(\x0b\x32\x16.gooseai.TransformTypeH\x04\x88\x01\x01\x12*\n\nparameters\x18\x07 \x03(\x0b\x32\x16.gooseai.StepParameter\x12\x36\n\x10masked_area_init\x18\x08 \x01(\x0e\x32\x17.gooseai.MaskedAreaInitH\x05\x88\x01\x01\x12\x31\n\rweight_method\x18\t \x01(\x0e\x32\x15.gooseai.WeightMethodH\x06\x88\x01\x01\x12\x15\n\x08quantize\x18\n \x01(\x08H\x07\x88\x01\x01\x12\x34\n\textension\x18\xf4\x03 \x01(\x0b\x32\x1b.gooseai.ExtendedParametersH\x08\x88\x01\x01\x12\x30\n\x05hires\x18\xfe\x03 \x01(\x0b\x32\x1b.gooseai.HiresFixParametersH\t\x88\x01\x01\x12\x14\n\x06tiling\x18\x88\x04 \x01(\x08H\n\x88\x01\x01\x12\x16\n\x08tiling_x\x18\x89\x04 \x01(\x08H\x0b\x88\x01\x01\x12\x16\n\x08tiling_y\x18\x8a\x04 \x01(\x08H\x0c\x88\x01\x01\x42\t\n\x07_heightB\x08\n\x06_widthB\n\n\x08_samplesB\x08\n\x06_stepsB\x0c\n\n_transformB\x13\n\x11_masked_area_initB\x10\n\x0e_weight_methodB\x0b\n\t_quantizeB\x0c\n\n_extensionB\x08\n\x06_hiresB\t\n\x07_tilingB\x0b\n\t_tiling_xB\x0b\n\t_tiling_y\"J\n\x11\x43lassifierConcept\x12\x0f\n\x07\x63oncept\x18\x01 \x01(\t\x12\x16\n\tthreshold\x18\x02 \x01(\x02H\x00\x88\x01\x01\x42\x0c\n\n_threshold\"\xf4\x01\n\x12\x43lassifierCategory\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\x08\x63oncepts\x18\x02 \x03(\x0b\x32\x1a.gooseai.ClassifierConcept\x12\x17\n\nadjustment\x18\x03 \x01(\x02H\x00\x88\x01\x01\x12$\n\x06\x61\x63tion\x18\x04 \x01(\x0e\x32\x0f.gooseai.ActionH\x01\x88\x01\x01\x12\x35\n\x0f\x63lassifier_mode\x18\x05 \x01(\x0e\x32\x17.gooseai.ClassifierModeH\x02\x88\x01\x01\x42\r\n\x0b_adjustmentB\t\n\x07_actionB\x12\n\x10_classifier_mode\"\xb8\x01\n\x14\x43lassifierParameters\x12/\n\ncategories\x18\x01 \x03(\x0b\x32\x1b.gooseai.ClassifierCategory\x12,\n\x07\x65xceeds\x18\x02 \x03(\x0b\x32\x1b.gooseai.ClassifierCategory\x12-\n\x0frealized_action\x18\x03 \x01(\x0e\x32\x0f.gooseai.ActionH\x00\x88\x01\x01\x42\x12\n\x10_realized_action\"k\n\x0f\x41ssetParameters\x12$\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x14.gooseai.AssetAction\x12\x12\n\nproject_id\x18\x02 \x01(\t\x12\x1e\n\x03use\x18\x03 \x01(\x0e\x32\x11.gooseai.AssetUse\"\x94\x01\n\nAnswerMeta\x12\x13\n\x06gpu_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06\x63pu_id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07node_id\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tengine_id\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\t\n\x07_gpu_idB\t\n\x07_cpu_idB\n\n\x08_node_idB\x0c\n\n_engine_id\"\xa9\x01\n\x06\x41nswer\x12\x11\n\tanswer_id\x18\x01 \x01(\t\x12\x12\n\nrequest_id\x18\x02 \x01(\t\x12\x10\n\x08received\x18\x03 \x01(\x04\x12\x0f\n\x07\x63reated\x18\x04 \x01(\x04\x12&\n\x04meta\x18\x06 \x01(\x0b\x32\x13.gooseai.AnswerMetaH\x00\x88\x01\x01\x12$\n\tartifacts\x18\x07 \x03(\x0b\x32\x11.gooseai.ArtifactB\x07\n\x05_meta\"\xfc\x02\n\x07Request\x12\x11\n\tengine_id\x18\x01 \x01(\t\x12\x12\n\nrequest_id\x18\x02 \x01(\t\x12-\n\x0erequested_type\x18\x03 \x01(\x0e\x32\x15.gooseai.ArtifactType\x12\x1f\n\x06prompt\x18\x04 \x03(\x0b\x32\x0f.gooseai.Prompt\x12)\n\x05image\x18\x05 \x01(\x0b\x32\x18.gooseai.ImageParametersH\x00\x12\x33\n\nclassifier\x18\x07 \x01(\x0b\x32\x1d.gooseai.ClassifierParametersH\x00\x12)\n\x05\x61sset\x18\x08 \x01(\x0b\x32\x18.gooseai.AssetParametersH\x00\x12\x38\n\x0b\x63onditioner\x18\x06 \x01(\x0b\x32\x1e.gooseai.ConditionerParametersH\x01\x88\x01\x01\x12\x0f\n\x06\x61\x63\x63\x65pt\x18\xf4\x03 \x01(\tB\x08\n\x06paramsB\x0e\n\x0c_conditionerJ\x04\x08\t\x10\nJ\x04\x08\n\x10\x0b\"w\n\x08OnStatus\x12%\n\x06reason\x18\x01 \x03(\x0e\x32\x15.gooseai.FinishReason\x12\x13\n\x06target\x18\x02 \x01(\tH\x00\x88\x01\x01\x12$\n\x06\x61\x63tion\x18\x03 \x03(\x0e\x32\x14.gooseai.StageActionB\t\n\x07_target\"\\\n\x05Stage\x12\n\n\x02id\x18\x01 \x01(\t\x12!\n\x07request\x18\x02 \x01(\x0b\x32\x10.gooseai.Request\x12$\n\ton_status\x18\x03 \x03(\x0b\x32\x11.gooseai.OnStatus\"A\n\x0c\x43hainRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x1d\n\x05stage\x18\x02 \x03(\x0b\x32\x0e.gooseai.Stage\",\n\x0b\x41syncStatus\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\"f\n\x0b\x41syncAnswer\x12\x1f\n\x06\x61nswer\x18\x01 \x03(\x0b\x32\x0f.gooseai.Answer\x12\x10\n\x08\x63omplete\x18\x02 \x01(\x08\x12$\n\x06status\x18\x03 \x01(\x0b\x32\x14.gooseai.AsyncStatus\"7\n\x0b\x41syncHandle\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x14\n\x0c\x61sync_handle\x18\x02 \x01(\t\"\x13\n\x11\x41syncCancelAnswer*E\n\x0c\x46inishReason\x12\x08\n\x04NULL\x10\x00\x12\n\n\x06LENGTH\x10\x01\x12\x08\n\x04STOP\x10\x02\x12\t\n\x05\x45RROR\x10\x03\x12\n\n\x06\x46ILTER\x10\x04*\xc6\x02\n\x0c\x41rtifactType\x12\x11\n\rARTIFACT_NONE\x10\x00\x12\x12\n\x0e\x41RTIFACT_IMAGE\x10\x01\x12\x12\n\x0e\x41RTIFACT_VIDEO\x10\x02\x12\x11\n\rARTIFACT_TEXT\x10\x03\x12\x13\n\x0f\x41RTIFACT_TOKENS\x10\x04\x12\x16\n\x12\x41RTIFACT_EMBEDDING\x10\x05\x12\x1c\n\x18\x41RTIFACT_CLASSIFICATIONS\x10\x06\x12\x11\n\rARTIFACT_MASK\x10\x07\x12\x13\n\x0f\x41RTIFACT_LATENT\x10\x08\x12\x13\n\x0f\x41RTIFACT_TENSOR\x10\t\x12\x12\n\rARTIFACT_LORA\x10\xf4\x03\x12\x13\n\x0e\x41RTIFACT_DEPTH\x10\xf5\x03\x12\x1d\n\x18\x41RTIFACT_TOKEN_EMBEDDING\x10\xf6\x03\x12\x18\n\x13\x41RTIFACT_HINT_IMAGE\x10\xf7\x03*M\n\x11GaussianDirection\x12\x12\n\x0e\x44IRECTION_NONE\x10\x00\x12\x10\n\x0c\x44IRECTION_UP\x10\x01\x12\x12\n\x0e\x44IRECTION_DOWN\x10\x02*\x83\x01\n\rChannelSource\x12\r\n\tCHANNEL_R\x10\x00\x12\r\n\tCHANNEL_G\x10\x01\x12\r\n\tCHANNEL_B\x10\x02\x12\r\n\tCHANNEL_A\x10\x03\x12\x10\n\x0c\x43HANNEL_ZERO\x10\x04\x12\x0f\n\x0b\x43HANNEL_ONE\x10\x05\x12\x13\n\x0f\x43HANNEL_DISCARD\x10\x06*\x8a\x01\n\x0bRescaleMode\x12\x12\n\x0eRESCALE_STRICT\x10\x00\x12\x11\n\rRESCALE_COVER\x10\x02\x12\x18\n\x14RESCALE_CONTAIN_ZERO\x10\x03\x12\x1d\n\x19RESCALE_CONTAIN_REPLICATE\x10\x04\x12\x1b\n\x17RESCALE_CONTAIN_REFLECT\x10\x05*D\n\x15\x42\x61\x63kgroundRemovalMode\x12\t\n\x05\x41LPHA\x10\x00\x12\t\n\x05SOLID\x10\x01\x12\x08\n\x04\x42LUR\x10\x02\x12\x0b\n\x07NOTHING\x10\x03*u\n\x14MaskPredictBehaviour\x12\x11\n\rMASK_AS_ALPHA\x10\x00\x12\x13\n\x0fMASK_OVER_SOLID\x10\x01\x12\x12\n\x0eMASK_OVER_BLUR\x10\x02\x12\x13\n\x0fMASK_DO_NOTHING\x10\x03\x12\x0c\n\x08MASK_USE\x10\x04*w\n\x0fMaskPredictMode\x12\x11\n\rPREDICT_MATTE\x10\x00\x12\x10\n\x0cPREDICT_MASK\x10\x01\x12\x14\n\x10PREDICT_OVERMASK\x10\x02\x12\x15\n\x11PREDICT_UNDERMASK\x10\x03\x12\x12\n\x0ePREDICT_TRIMAP\x10\x04*t\n\rArtifactStage\x12\x1f\n\x1b\x41RTIFACT_BEFORE_ADJUSTMENTS\x10\x00\x12\x1e\n\x1a\x41RTIFACT_AFTER_ADJUSTMENTS\x10\x01\x12\"\n\x1e\x41RTIFACT_AFTER_POSTADJUSTMENTS\x10\x02*g\n\x0eMaskedAreaInit\x12\x19\n\x15MASKED_AREA_INIT_ZERO\x10\x00\x12\x1b\n\x17MASKED_AREA_INIT_RANDOM\x10\x01\x12\x1d\n\x19MASKED_AREA_INIT_ORIGINAL\x10\x02*5\n\x0cWeightMethod\x12\x10\n\x0cTEXT_ENCODER\x10\x00\x12\x13\n\x0f\x43ROSS_ATTENTION\x10\x01*j\n\x0cHintPriority\x12\x11\n\rHINT_BALANCED\x10\x00\x12\x1a\n\x16HINT_PRIORITISE_PROMPT\x10\x01\x12\x18\n\x14HINT_PRIORITISE_HINT\x10\x02\x12\x11\n\rHINT_ADAPTIVE\x10\x03*;\n\rLOIPointLabel\x12\x14\n\x10POINT_BACKGROUND\x10\x00\x12\x14\n\x10POINT_FOREGROUND\x10\x01*\x9f\x01\n\x0fInpaintFillMode\x12\x15\n\x11INPAINT_FILL_AUTO\x10\x00\x12\x15\n\x11INPAINT_FILL_NONE\x10\x01\x12\x18\n\x14INPAINT_FILL_SHUFFLE\x10\x02\x12\x17\n\x13INPAINT_FILL_REPEAT\x10\x03\x12\x13\n\x0fINPAINT_FILL_AI\x10\x04\x12\x16\n\x12INPAINT_FILL_NOISE\x10\x05*\x9b\x04\n\x10\x44iffusionSampler\x12\x10\n\x0cSAMPLER_DDIM\x10\x00\x12\x10\n\x0cSAMPLER_DDPM\x10\x01\x12\x13\n\x0fSAMPLER_K_EULER\x10\x02\x12\x1d\n\x19SAMPLER_K_EULER_ANCESTRAL\x10\x03\x12\x12\n\x0eSAMPLER_K_HEUN\x10\x04\x12\x13\n\x0fSAMPLER_K_DPM_2\x10\x05\x12\x1d\n\x19SAMPLER_K_DPM_2_ANCESTRAL\x10\x06\x12\x11\n\rSAMPLER_K_LMS\x10\x07\x12 \n\x1cSAMPLER_K_DPMPP_2S_ANCESTRAL\x10\x08\x12\x16\n\x12SAMPLER_K_DPMPP_2M\x10\t\x12\x17\n\x13SAMPLER_K_DPMPP_SDE\x10\n\x12\x1f\n\x1aSAMPLER_DPMSOLVERPP_1ORDER\x10\xf4\x03\x12\x1f\n\x1aSAMPLER_DPMSOLVERPP_2ORDER\x10\xf5\x03\x12\x1f\n\x1aSAMPLER_DPMSOLVERPP_3ORDER\x10\xf6\x03\x12\x15\n\x10SAMPLER_DPM_FAST\x10\xa6\x04\x12\x19\n\x14SAMPLER_DPM_ADAPTIVE\x10\xa7\x04\x12)\n SAMPLER_DPMSOLVERPP_2S_ANCESTRAL\x10\xa8\x04\x1a\x02\x08\x01\x12 \n\x17SAMPLER_DPMSOLVERPP_SDE\x10\xa9\x04\x1a\x02\x08\x01\x12\x1f\n\x16SAMPLER_DPMSOLVERPP_2M\x10\xaa\x04\x1a\x02\x08\x01*H\n\x10SamplerNoiseType\x12\x18\n\x14SAMPLER_NOISE_NORMAL\x10\x00\x12\x1a\n\x16SAMPLER_NOISE_BROWNIAN\x10\x01*F\n\x08Upscaler\x12\x10\n\x0cUPSCALER_RGB\x10\x00\x12\x13\n\x0fUPSCALER_GFPGAN\x10\x01\x12\x13\n\x0fUPSCALER_ESRGAN\x10\x02*\xd8\x01\n\x0eGuidancePreset\x12\x18\n\x14GUIDANCE_PRESET_NONE\x10\x00\x12\x1a\n\x16GUIDANCE_PRESET_SIMPLE\x10\x01\x12\x1d\n\x19GUIDANCE_PRESET_FAST_BLUE\x10\x02\x12\x1e\n\x1aGUIDANCE_PRESET_FAST_GREEN\x10\x03\x12\x18\n\x14GUIDANCE_PRESET_SLOW\x10\x04\x12\x1a\n\x16GUIDANCE_PRESET_SLOWER\x10\x05\x12\x1b\n\x17GUIDANCE_PRESET_SLOWEST\x10\x06*\x91\x01\n\x11ModelArchitecture\x12\x1b\n\x17MODEL_ARCHITECTURE_NONE\x10\x00\x12\x1f\n\x1bMODEL_ARCHITECTURE_CLIP_VIT\x10\x01\x12\"\n\x1eMODEL_ARCHITECTURE_CLIP_RESNET\x10\x02\x12\x1a\n\x16MODEL_ARCHITECTURE_LDM\x10\x03*\xa2\x01\n\x06\x41\x63tion\x12\x16\n\x12\x41\x43TION_PASSTHROUGH\x10\x00\x12\x1f\n\x1b\x41\x43TION_REGENERATE_DUPLICATE\x10\x01\x12\x15\n\x11\x41\x43TION_REGENERATE\x10\x02\x12\x1e\n\x1a\x41\x43TION_OBFUSCATE_DUPLICATE\x10\x03\x12\x14\n\x10\x41\x43TION_OBFUSCATE\x10\x04\x12\x12\n\x0e\x41\x43TION_DISCARD\x10\x05*D\n\x0e\x43lassifierMode\x12\x17\n\x13\x43LSFR_MODE_ZEROSHOT\x10\x00\x12\x19\n\x15\x43LSFR_MODE_MULTICLASS\x10\x01*=\n\x0b\x41ssetAction\x12\r\n\tASSET_PUT\x10\x00\x12\r\n\tASSET_GET\x10\x01\x12\x10\n\x0c\x41SSET_DELETE\x10\x02*\x81\x01\n\x08\x41ssetUse\x12\x17\n\x13\x41SSET_USE_UNDEFINED\x10\x00\x12\x13\n\x0f\x41SSET_USE_INPUT\x10\x01\x12\x14\n\x10\x41SSET_USE_OUTPUT\x10\x02\x12\x1a\n\x16\x41SSET_USE_INTERMEDIATE\x10\x03\x12\x15\n\x11\x41SSET_USE_PROJECT\x10\x04*W\n\x0bStageAction\x12\x15\n\x11STAGE_ACTION_PASS\x10\x00\x12\x18\n\x14STAGE_ACTION_DISCARD\x10\x01\x12\x17\n\x13STAGE_ACTION_RETURN\x10\x02\x32\xbe\x02\n\x11GenerationService\x12\x31\n\x08Generate\x12\x10.gooseai.Request\x1a\x0f.gooseai.Answer\"\x00\x30\x01\x12;\n\rChainGenerate\x12\x15.gooseai.ChainRequest\x1a\x0f.gooseai.Answer\"\x00\x30\x01\x12\x39\n\rAsyncGenerate\x12\x10.gooseai.Request\x1a\x14.gooseai.AsyncHandle\"\x00\x12;\n\x0b\x41syncResult\x12\x14.gooseai.AsyncHandle\x1a\x14.gooseai.AsyncAnswer\"\x00\x12\x41\n\x0b\x41syncCancel\x12\x14.gooseai.AsyncHandle\x1a\x1a.gooseai.AsyncCancelAnswer\"\x00\x42;Z9github.com/stability-ai/api-interfaces/gooseai/generationb\x06proto3') _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'generation_pb2', globals()) @@ -36,52 +36,54 @@ _ARTIFACT.fields_by_name['lora']._serialized_options = b'\030\001' _ARTIFACT.fields_by_name['token_embedding']._options = None _ARTIFACT.fields_by_name['token_embedding']._serialized_options = b'\030\001' - _FINISHREASON._serialized_start=10420 - _FINISHREASON._serialized_end=10489 - _ARTIFACTTYPE._serialized_start=10492 - _ARTIFACTTYPE._serialized_end=10818 - _GAUSSIANDIRECTION._serialized_start=10820 - _GAUSSIANDIRECTION._serialized_end=10897 - _CHANNELSOURCE._serialized_start=10900 - _CHANNELSOURCE._serialized_end=11031 - _RESCALEMODE._serialized_start=11034 - _RESCALEMODE._serialized_end=11172 - _BACKGROUNDREMOVALMODE._serialized_start=11174 - _BACKGROUNDREMOVALMODE._serialized_end=11242 - _MASKPREDICTBEHAVIOUR._serialized_start=11244 - _MASKPREDICTBEHAVIOUR._serialized_end=11361 - _MASKPREDICTMODE._serialized_start=11363 - _MASKPREDICTMODE._serialized_end=11482 - _ARTIFACTSTAGE._serialized_start=11484 - _ARTIFACTSTAGE._serialized_end=11600 - _MASKEDAREAINIT._serialized_start=11602 - _MASKEDAREAINIT._serialized_end=11705 - _WEIGHTMETHOD._serialized_start=11707 - _WEIGHTMETHOD._serialized_end=11760 - _HINTPRIORITY._serialized_start=11762 - _HINTPRIORITY._serialized_end=11868 - _LOIPOINTLABEL._serialized_start=11870 - _LOIPOINTLABEL._serialized_end=11929 - _DIFFUSIONSAMPLER._serialized_start=11932 - _DIFFUSIONSAMPLER._serialized_end=12471 - _SAMPLERNOISETYPE._serialized_start=12473 - _SAMPLERNOISETYPE._serialized_end=12545 - _UPSCALER._serialized_start=12547 - _UPSCALER._serialized_end=12617 - _GUIDANCEPRESET._serialized_start=12620 - _GUIDANCEPRESET._serialized_end=12836 - _MODELARCHITECTURE._serialized_start=12839 - _MODELARCHITECTURE._serialized_end=12984 - _ACTION._serialized_start=12987 - _ACTION._serialized_end=13149 - _CLASSIFIERMODE._serialized_start=13151 - _CLASSIFIERMODE._serialized_end=13219 - _ASSETACTION._serialized_start=13221 - _ASSETACTION._serialized_end=13282 - _ASSETUSE._serialized_start=13285 - _ASSETUSE._serialized_end=13414 - _STAGEACTION._serialized_start=13416 - _STAGEACTION._serialized_end=13503 + _FINISHREASON._serialized_start=10723 + _FINISHREASON._serialized_end=10792 + _ARTIFACTTYPE._serialized_start=10795 + _ARTIFACTTYPE._serialized_end=11121 + _GAUSSIANDIRECTION._serialized_start=11123 + _GAUSSIANDIRECTION._serialized_end=11200 + _CHANNELSOURCE._serialized_start=11203 + _CHANNELSOURCE._serialized_end=11334 + _RESCALEMODE._serialized_start=11337 + _RESCALEMODE._serialized_end=11475 + _BACKGROUNDREMOVALMODE._serialized_start=11477 + _BACKGROUNDREMOVALMODE._serialized_end=11545 + _MASKPREDICTBEHAVIOUR._serialized_start=11547 + _MASKPREDICTBEHAVIOUR._serialized_end=11664 + _MASKPREDICTMODE._serialized_start=11666 + _MASKPREDICTMODE._serialized_end=11785 + _ARTIFACTSTAGE._serialized_start=11787 + _ARTIFACTSTAGE._serialized_end=11903 + _MASKEDAREAINIT._serialized_start=11905 + _MASKEDAREAINIT._serialized_end=12008 + _WEIGHTMETHOD._serialized_start=12010 + _WEIGHTMETHOD._serialized_end=12063 + _HINTPRIORITY._serialized_start=12065 + _HINTPRIORITY._serialized_end=12171 + _LOIPOINTLABEL._serialized_start=12173 + _LOIPOINTLABEL._serialized_end=12232 + _INPAINTFILLMODE._serialized_start=12235 + _INPAINTFILLMODE._serialized_end=12394 + _DIFFUSIONSAMPLER._serialized_start=12397 + _DIFFUSIONSAMPLER._serialized_end=12936 + _SAMPLERNOISETYPE._serialized_start=12938 + _SAMPLERNOISETYPE._serialized_end=13010 + _UPSCALER._serialized_start=13012 + _UPSCALER._serialized_end=13082 + _GUIDANCEPRESET._serialized_start=13085 + _GUIDANCEPRESET._serialized_end=13301 + _MODELARCHITECTURE._serialized_start=13304 + _MODELARCHITECTURE._serialized_end=13449 + _ACTION._serialized_start=13452 + _ACTION._serialized_end=13614 + _CLASSIFIERMODE._serialized_start=13616 + _CLASSIFIERMODE._serialized_end=13684 + _ASSETACTION._serialized_start=13686 + _ASSETACTION._serialized_end=13747 + _ASSETUSE._serialized_start=13750 + _ASSETUSE._serialized_end=13879 + _STAGEACTION._serialized_start=13881 + _STAGEACTION._serialized_end=13968 _TOKEN._serialized_start=44 _TOKEN._serialized_end=91 _TOKENS._serialized_start=93 @@ -126,100 +128,104 @@ _IMAGEADJUSTMENT_MASKPREDICT._serialized_end=1854 _IMAGEADJUSTMENT_MASKREUSE._serialized_start=1856 _IMAGEADJUSTMENT_MASKREUSE._serialized_end=1933 - _IMAGEADJUSTMENT._serialized_start=1936 - _IMAGEADJUSTMENT._serialized_end=3125 - _SAFETENSORSMETA._serialized_start=3127 - _SAFETENSORSMETA._serialized_end=3172 - _SAFETENSORSTENSOR._serialized_start=3174 - _SAFETENSORSTENSOR._serialized_end=3239 - _SAFETENSORS._serialized_start=3241 - _SAFETENSORS._serialized_end=3343 - _LORAWEIGHT._serialized_start=3345 - _LORAWEIGHT._serialized_end=3393 - _LORA._serialized_start=3395 - _LORA._serialized_end=3479 - _ARTIFACTREFERENCE._serialized_start=3481 - _ARTIFACTREFERENCE._serialized_end=3582 - _TOKENEMBEDDING._serialized_start=3584 - _TOKENEMBEDDING._serialized_end=3647 - _CACHECONTROL._serialized_start=3649 - _CACHECONTROL._serialized_end=3737 - _ARTIFACT._serialized_start=3740 - _ARTIFACT._serialized_end=4552 - _NAMEDWEIGHT._serialized_start=4554 - _NAMEDWEIGHT._serialized_end=4597 - _TOKENOVERRIDE._serialized_start=4599 - _TOKENOVERRIDE._serialized_end=4677 - _LOIPOINT._serialized_start=4679 - _LOIPOINT._serialized_end=4765 - _LOIRECTANGLE._serialized_start=4767 - _LOIRECTANGLE._serialized_end=4839 - _LOCATIONSOFINTEREST._serialized_start=4841 - _LOCATIONSOFINTEREST._serialized_end=4940 - _PROMPTPARAMETERS._serialized_start=4943 - _PROMPTPARAMETERS._serialized_end=5228 - _PROMPT._serialized_start=5231 - _PROMPT._serialized_end=5472 - _SIGMAPARAMETERS._serialized_start=5475 - _SIGMAPARAMETERS._serialized_end=5608 - _CHURNSETTINGS._serialized_start=5610 - _CHURNSETTINGS._serialized_end=5720 - _SAMPLERPARAMETERS._serialized_start=5723 - _SAMPLERPARAMETERS._serialized_end=6246 - _CONDITIONERPARAMETERS._serialized_start=6249 - _CONDITIONERPARAMETERS._serialized_end=6388 - _SCHEDULEPARAMETERS._serialized_start=6390 - _SCHEDULEPARAMETERS._serialized_end=6496 - _STEPPARAMETER._serialized_start=6499 - _STEPPARAMETER._serialized_end=6727 - _MODEL._serialized_start=6730 - _MODEL._serialized_end=6881 - _CUTOUTPARAMETERS._serialized_start=6884 - _CUTOUTPARAMETERS._serialized_end=7072 - _GUIDANCESCHEDULEPARAMETERS._serialized_start=7074 - _GUIDANCESCHEDULEPARAMETERS._serialized_end=7135 - _GUIDANCEINSTANCEPARAMETERS._serialized_start=7138 - _GUIDANCEINSTANCEPARAMETERS._serialized_end=7417 - _GUIDANCEPARAMETERS._serialized_start=7419 - _GUIDANCEPARAMETERS._serialized_end=7545 - _TRANSFORMTYPE._serialized_start=7547 - _TRANSFORMTYPE._serialized_end=7657 - _EXTENDEDPARAMETER._serialized_start=7659 - _EXTENDEDPARAMETER._serialized_end=7748 - _EXTENDEDPARAMETERS._serialized_start=7750 - _EXTENDEDPARAMETERS._serialized_end=7818 - _HIRESFIXPARAMETERS._serialized_start=7820 - _HIRESFIXPARAMETERS._serialized_end=7900 - _IMAGEPARAMETERS._serialized_start=7903 - _IMAGEPARAMETERS._serialized_end=8583 - _CLASSIFIERCONCEPT._serialized_start=8585 - _CLASSIFIERCONCEPT._serialized_end=8659 - _CLASSIFIERCATEGORY._serialized_start=8662 - _CLASSIFIERCATEGORY._serialized_end=8906 - _CLASSIFIERPARAMETERS._serialized_start=8909 - _CLASSIFIERPARAMETERS._serialized_end=9093 - _ASSETPARAMETERS._serialized_start=9095 - _ASSETPARAMETERS._serialized_end=9202 - _ANSWERMETA._serialized_start=9205 - _ANSWERMETA._serialized_end=9353 - _ANSWER._serialized_start=9356 - _ANSWER._serialized_end=9525 - _REQUEST._serialized_start=9528 - _REQUEST._serialized_end=9908 - _ONSTATUS._serialized_start=9910 - _ONSTATUS._serialized_end=10029 - _STAGE._serialized_start=10031 - _STAGE._serialized_end=10123 - _CHAINREQUEST._serialized_start=10125 - _CHAINREQUEST._serialized_end=10190 - _ASYNCSTATUS._serialized_start=10192 - _ASYNCSTATUS._serialized_end=10236 - _ASYNCANSWER._serialized_start=10238 - _ASYNCANSWER._serialized_end=10340 - _ASYNCHANDLE._serialized_start=10342 - _ASYNCHANDLE._serialized_end=10397 - _ASYNCCANCELANSWER._serialized_start=10399 - _ASYNCCANCELANSWER._serialized_end=10418 - _GENERATIONSERVICE._serialized_start=13506 - _GENERATIONSERVICE._serialized_end=13824 + _IMAGEADJUSTMENT_MASKSOFTDILATE._serialized_start=1935 + _IMAGEADJUSTMENT_MASKSOFTDILATE._serialized_end=1997 + _IMAGEADJUSTMENT._serialized_start=2000 + _IMAGEADJUSTMENT._serialized_end=3258 + _SAFETENSORSMETA._serialized_start=3260 + _SAFETENSORSMETA._serialized_end=3305 + _SAFETENSORSTENSOR._serialized_start=3307 + _SAFETENSORSTENSOR._serialized_end=3372 + _SAFETENSORS._serialized_start=3374 + _SAFETENSORS._serialized_end=3476 + _LORAWEIGHT._serialized_start=3478 + _LORAWEIGHT._serialized_end=3526 + _LORA._serialized_start=3528 + _LORA._serialized_end=3612 + _ARTIFACTREFERENCE._serialized_start=3614 + _ARTIFACTREFERENCE._serialized_end=3715 + _TOKENEMBEDDING._serialized_start=3717 + _TOKENEMBEDDING._serialized_end=3780 + _CACHECONTROL._serialized_start=3782 + _CACHECONTROL._serialized_end=3870 + _ARTIFACT._serialized_start=3873 + _ARTIFACT._serialized_end=4685 + _NAMEDWEIGHT._serialized_start=4687 + _NAMEDWEIGHT._serialized_end=4730 + _TOKENOVERRIDE._serialized_start=4732 + _TOKENOVERRIDE._serialized_end=4810 + _LOIPOINT._serialized_start=4812 + _LOIPOINT._serialized_end=4898 + _LOIRECTANGLE._serialized_start=4900 + _LOIRECTANGLE._serialized_end=4972 + _LOCATIONSOFINTEREST._serialized_start=4974 + _LOCATIONSOFINTEREST._serialized_end=5073 + _INPAINTPARAMETERS._serialized_start=5075 + _INPAINTPARAMETERS._serialized_end=5158 + _PROMPTPARAMETERS._serialized_start=5161 + _PROMPTPARAMETERS._serialized_end=5531 + _PROMPT._serialized_start=5534 + _PROMPT._serialized_end=5775 + _SIGMAPARAMETERS._serialized_start=5778 + _SIGMAPARAMETERS._serialized_end=5911 + _CHURNSETTINGS._serialized_start=5913 + _CHURNSETTINGS._serialized_end=6023 + _SAMPLERPARAMETERS._serialized_start=6026 + _SAMPLERPARAMETERS._serialized_end=6549 + _CONDITIONERPARAMETERS._serialized_start=6552 + _CONDITIONERPARAMETERS._serialized_end=6691 + _SCHEDULEPARAMETERS._serialized_start=6693 + _SCHEDULEPARAMETERS._serialized_end=6799 + _STEPPARAMETER._serialized_start=6802 + _STEPPARAMETER._serialized_end=7030 + _MODEL._serialized_start=7033 + _MODEL._serialized_end=7184 + _CUTOUTPARAMETERS._serialized_start=7187 + _CUTOUTPARAMETERS._serialized_end=7375 + _GUIDANCESCHEDULEPARAMETERS._serialized_start=7377 + _GUIDANCESCHEDULEPARAMETERS._serialized_end=7438 + _GUIDANCEINSTANCEPARAMETERS._serialized_start=7441 + _GUIDANCEINSTANCEPARAMETERS._serialized_end=7720 + _GUIDANCEPARAMETERS._serialized_start=7722 + _GUIDANCEPARAMETERS._serialized_end=7848 + _TRANSFORMTYPE._serialized_start=7850 + _TRANSFORMTYPE._serialized_end=7960 + _EXTENDEDPARAMETER._serialized_start=7962 + _EXTENDEDPARAMETER._serialized_end=8051 + _EXTENDEDPARAMETERS._serialized_start=8053 + _EXTENDEDPARAMETERS._serialized_end=8121 + _HIRESFIXPARAMETERS._serialized_start=8123 + _HIRESFIXPARAMETERS._serialized_end=8203 + _IMAGEPARAMETERS._serialized_start=8206 + _IMAGEPARAMETERS._serialized_end=8886 + _CLASSIFIERCONCEPT._serialized_start=8888 + _CLASSIFIERCONCEPT._serialized_end=8962 + _CLASSIFIERCATEGORY._serialized_start=8965 + _CLASSIFIERCATEGORY._serialized_end=9209 + _CLASSIFIERPARAMETERS._serialized_start=9212 + _CLASSIFIERPARAMETERS._serialized_end=9396 + _ASSETPARAMETERS._serialized_start=9398 + _ASSETPARAMETERS._serialized_end=9505 + _ANSWERMETA._serialized_start=9508 + _ANSWERMETA._serialized_end=9656 + _ANSWER._serialized_start=9659 + _ANSWER._serialized_end=9828 + _REQUEST._serialized_start=9831 + _REQUEST._serialized_end=10211 + _ONSTATUS._serialized_start=10213 + _ONSTATUS._serialized_end=10332 + _STAGE._serialized_start=10334 + _STAGE._serialized_end=10426 + _CHAINREQUEST._serialized_start=10428 + _CHAINREQUEST._serialized_end=10493 + _ASYNCSTATUS._serialized_start=10495 + _ASYNCSTATUS._serialized_end=10539 + _ASYNCANSWER._serialized_start=10541 + _ASYNCANSWER._serialized_end=10643 + _ASYNCHANDLE._serialized_start=10645 + _ASYNCHANDLE._serialized_end=10700 + _ASYNCCANCELANSWER._serialized_start=10702 + _ASYNCCANCELANSWER._serialized_end=10721 + _GENERATIONSERVICE._serialized_start=13971 + _GENERATIONSERVICE._serialized_end=14289 # @@protoc_insertion_point(module_scope) diff --git a/gyre/generated/generation_pb2.pyi b/gyre/generated/generation_pb2.pyi index 8da17e8..af5dc63 100644 --- a/gyre/generated/generation_pb2.pyi +++ b/gyre/generated/generation_pb2.pyi @@ -326,6 +326,29 @@ POINT_BACKGROUND: LOIPointLabel.ValueType # 0 POINT_FOREGROUND: LOIPointLabel.ValueType # 1 global___LOIPointLabel = LOIPointLabel +class _InpaintFillMode: + ValueType = typing.NewType("ValueType", builtins.int) + V: typing_extensions.TypeAlias = ValueType + +class _InpaintFillModeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_InpaintFillMode.ValueType], builtins.type): + DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor + INPAINT_FILL_AUTO: _InpaintFillMode.ValueType # 0 + INPAINT_FILL_NONE: _InpaintFillMode.ValueType # 1 + INPAINT_FILL_SHUFFLE: _InpaintFillMode.ValueType # 2 + INPAINT_FILL_REPEAT: _InpaintFillMode.ValueType # 3 + INPAINT_FILL_AI: _InpaintFillMode.ValueType # 4 + INPAINT_FILL_NOISE: _InpaintFillMode.ValueType # 5 + +class InpaintFillMode(_InpaintFillMode, metaclass=_InpaintFillModeEnumTypeWrapper): ... + +INPAINT_FILL_AUTO: InpaintFillMode.ValueType # 0 +INPAINT_FILL_NONE: InpaintFillMode.ValueType # 1 +INPAINT_FILL_SHUFFLE: InpaintFillMode.ValueType # 2 +INPAINT_FILL_REPEAT: InpaintFillMode.ValueType # 3 +INPAINT_FILL_AI: InpaintFillMode.ValueType # 4 +INPAINT_FILL_NOISE: InpaintFillMode.ValueType # 5 +global___InpaintFillMode = InpaintFillMode + class _DiffusionSampler: ValueType = typing.NewType("ValueType", builtins.int) V: typing_extensions.TypeAlias = ValueType @@ -1032,6 +1055,23 @@ class ImageAdjustment_MaskReuse(google.protobuf.message.Message): global___ImageAdjustment_MaskReuse = ImageAdjustment_MaskReuse +@typing_extensions.final +class ImageAdjustment_MaskSoftDilate(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + SIGMA_FIELD_NUMBER: builtins.int + sigma: builtins.int + def __init__( + self, + *, + sigma: builtins.int | None = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_sigma", b"_sigma", "sigma", b"sigma"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_sigma", b"_sigma", "sigma", b"sigma"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["_sigma", b"_sigma"]) -> typing_extensions.Literal["sigma"] | None: ... + +global___ImageAdjustment_MaskSoftDilate = ImageAdjustment_MaskSoftDilate + @typing_extensions.final class ImageAdjustment(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor @@ -1056,6 +1096,7 @@ class ImageAdjustment(google.protobuf.message.Message): SHUFFLE_FIELD_NUMBER: builtins.int MASK_PREDICT_FIELD_NUMBER: builtins.int MASK_REUSE_FIELD_NUMBER: builtins.int + MASK_SOFT_DILATE_FIELD_NUMBER: builtins.int ENGINE_ID_FIELD_NUMBER: builtins.int @property def blur(self) -> global___ImageAdjustment_Gaussian: ... @@ -1097,6 +1138,8 @@ class ImageAdjustment(google.protobuf.message.Message): def mask_predict(self) -> global___ImageAdjustment_MaskPredict: ... @property def mask_reuse(self) -> global___ImageAdjustment_MaskReuse: ... + @property + def mask_soft_dilate(self) -> global___ImageAdjustment_MaskSoftDilate: ... engine_id: builtins.str def __init__( self, @@ -1121,14 +1164,15 @@ class ImageAdjustment(google.protobuf.message.Message): shuffle: global___ImageAdjustment_Shuffle | None = ..., mask_predict: global___ImageAdjustment_MaskPredict | None = ..., mask_reuse: global___ImageAdjustment_MaskReuse | None = ..., + mask_soft_dilate: global___ImageAdjustment_MaskSoftDilate | None = ..., engine_id: builtins.str | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_engine_id", b"_engine_id", "adjustment", b"adjustment", "autoscale", b"autoscale", "background_removal", b"background_removal", "blur", b"blur", "canny_edge", b"canny_edge", "channels", b"channels", "crop", b"crop", "depth", b"depth", "edge_detection", b"edge_detection", "engine_id", b"engine_id", "invert", b"invert", "keypose", b"keypose", "levels", b"levels", "mask_predict", b"mask_predict", "mask_reuse", b"mask_reuse", "normal", b"normal", "openpose", b"openpose", "palletize", b"palletize", "quantize", b"quantize", "rescale", b"rescale", "segmentation", b"segmentation", "shuffle", b"shuffle"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_engine_id", b"_engine_id", "adjustment", b"adjustment", "autoscale", b"autoscale", "background_removal", b"background_removal", "blur", b"blur", "canny_edge", b"canny_edge", "channels", b"channels", "crop", b"crop", "depth", b"depth", "edge_detection", b"edge_detection", "engine_id", b"engine_id", "invert", b"invert", "keypose", b"keypose", "levels", b"levels", "mask_predict", b"mask_predict", "mask_reuse", b"mask_reuse", "normal", b"normal", "openpose", b"openpose", "palletize", b"palletize", "quantize", b"quantize", "rescale", b"rescale", "segmentation", b"segmentation", "shuffle", b"shuffle"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_engine_id", b"_engine_id", "adjustment", b"adjustment", "autoscale", b"autoscale", "background_removal", b"background_removal", "blur", b"blur", "canny_edge", b"canny_edge", "channels", b"channels", "crop", b"crop", "depth", b"depth", "edge_detection", b"edge_detection", "engine_id", b"engine_id", "invert", b"invert", "keypose", b"keypose", "levels", b"levels", "mask_predict", b"mask_predict", "mask_reuse", b"mask_reuse", "mask_soft_dilate", b"mask_soft_dilate", "normal", b"normal", "openpose", b"openpose", "palletize", b"palletize", "quantize", b"quantize", "rescale", b"rescale", "segmentation", b"segmentation", "shuffle", b"shuffle"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_engine_id", b"_engine_id", "adjustment", b"adjustment", "autoscale", b"autoscale", "background_removal", b"background_removal", "blur", b"blur", "canny_edge", b"canny_edge", "channels", b"channels", "crop", b"crop", "depth", b"depth", "edge_detection", b"edge_detection", "engine_id", b"engine_id", "invert", b"invert", "keypose", b"keypose", "levels", b"levels", "mask_predict", b"mask_predict", "mask_reuse", b"mask_reuse", "mask_soft_dilate", b"mask_soft_dilate", "normal", b"normal", "openpose", b"openpose", "palletize", b"palletize", "quantize", b"quantize", "rescale", b"rescale", "segmentation", b"segmentation", "shuffle", b"shuffle"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_engine_id", b"_engine_id"]) -> typing_extensions.Literal["engine_id"] | None: ... @typing.overload - def WhichOneof(self, oneof_group: typing_extensions.Literal["adjustment", b"adjustment"]) -> typing_extensions.Literal["blur", "invert", "levels", "channels", "rescale", "crop", "depth", "canny_edge", "edge_detection", "segmentation", "keypose", "openpose", "normal", "background_removal", "autoscale", "palletize", "quantize", "shuffle", "mask_predict", "mask_reuse"] | None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["adjustment", b"adjustment"]) -> typing_extensions.Literal["blur", "invert", "levels", "channels", "rescale", "crop", "depth", "canny_edge", "edge_detection", "segmentation", "keypose", "openpose", "normal", "background_removal", "autoscale", "palletize", "quantize", "shuffle", "mask_predict", "mask_reuse", "mask_soft_dilate"] | None: ... global___ImageAdjustment = ImageAdjustment @@ -1536,6 +1580,23 @@ class LocationsOfInterest(google.protobuf.message.Message): global___LocationsOfInterest = LocationsOfInterest +@typing_extensions.final +class InpaintParameters(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + FILL_MODE_FIELD_NUMBER: builtins.int + fill_mode: global___InpaintFillMode.ValueType + def __init__( + self, + *, + fill_mode: global___InpaintFillMode.ValueType | None = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_fill_mode", b"_fill_mode", "fill_mode", b"fill_mode"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_fill_mode", b"_fill_mode", "fill_mode", b"fill_mode"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["_fill_mode", b"_fill_mode"]) -> typing_extensions.Literal["fill_mode"] | None: ... + +global___InpaintParameters = InpaintParameters + @typing_extensions.final class PromptParameters(google.protobuf.message.Message): """A set of parameters for each individual Prompt.""" @@ -1548,6 +1609,7 @@ class PromptParameters(google.protobuf.message.Message): TOKEN_OVERRIDES_FIELD_NUMBER: builtins.int CLIP_LAYER_FIELD_NUMBER: builtins.int HINT_PRIORITY_FIELD_NUMBER: builtins.int + INPAINT_PARAMETERS_FIELD_NUMBER: builtins.int init: builtins.bool weight: builtins.float @property @@ -1561,7 +1623,10 @@ class PromptParameters(google.protobuf.message.Message): 0 _or_ 1 == final, 2 = penultimate, 3 = next """ hint_priority: global___HintPriority.ValueType - """Soecify the application mode for hints""" + """Specify the application mode for hints""" + @property + def inpaint_parameters(self) -> global___InpaintParameters: + """Specify the inpaint controls for inpainting""" def __init__( self, *, @@ -1571,9 +1636,10 @@ class PromptParameters(google.protobuf.message.Message): token_overrides: collections.abc.Iterable[global___TokenOverride] | None = ..., clip_layer: builtins.int | None = ..., hint_priority: global___HintPriority.ValueType | None = ..., + inpaint_parameters: global___InpaintParameters | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_clip_layer", b"_clip_layer", "_hint_priority", b"_hint_priority", "_init", b"_init", "_weight", b"_weight", "clip_layer", b"clip_layer", "hint_priority", b"hint_priority", "init", b"init", "weight", b"weight"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_clip_layer", b"_clip_layer", "_hint_priority", b"_hint_priority", "_init", b"_init", "_weight", b"_weight", "clip_layer", b"clip_layer", "hint_priority", b"hint_priority", "init", b"init", "named_weights", b"named_weights", "token_overrides", b"token_overrides", "weight", b"weight"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_clip_layer", b"_clip_layer", "_hint_priority", b"_hint_priority", "_init", b"_init", "_inpaint_parameters", b"_inpaint_parameters", "_weight", b"_weight", "clip_layer", b"clip_layer", "hint_priority", b"hint_priority", "init", b"init", "inpaint_parameters", b"inpaint_parameters", "weight", b"weight"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_clip_layer", b"_clip_layer", "_hint_priority", b"_hint_priority", "_init", b"_init", "_inpaint_parameters", b"_inpaint_parameters", "_weight", b"_weight", "clip_layer", b"clip_layer", "hint_priority", b"hint_priority", "init", b"init", "inpaint_parameters", b"inpaint_parameters", "named_weights", b"named_weights", "token_overrides", b"token_overrides", "weight", b"weight"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_clip_layer", b"_clip_layer"]) -> typing_extensions.Literal["clip_layer"] | None: ... @typing.overload @@ -1581,6 +1647,8 @@ class PromptParameters(google.protobuf.message.Message): @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_init", b"_init"]) -> typing_extensions.Literal["init"] | None: ... @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_inpaint_parameters", b"_inpaint_parameters"]) -> typing_extensions.Literal["inpaint_parameters"] | None: ... + @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_weight", b"_weight"]) -> typing_extensions.Literal["weight"] | None: ... global___PromptParameters = PromptParameters diff --git a/gyre/generated/stablecabal.openapi.json b/gyre/generated/stablecabal.openapi.json index f9ee51b..768675c 100644 --- a/gyre/generated/stablecabal.openapi.json +++ b/gyre/generated/stablecabal.openapi.json @@ -1807,6 +1807,9 @@ "maskReuse": { "$ref": "#/components/schemas/ImageAdjustment_MaskReuse" }, + "maskSoftDilate": { + "$ref": "#/components/schemas/ImageAdjustment_MaskSoftDilate" + }, "engineId": { "type": "string" } @@ -1983,6 +1986,15 @@ }, "title": "Reuse the most recently predicted mask" }, + "ImageAdjustment_MaskSoftDilate": { + "type": "object", + "properties": { + "sigma": { + "type": "string", + "format": "uint64" + } + } + }, "ImageAdjustment_Normal": { "type": "object", "properties": { @@ -2128,6 +2140,26 @@ } } }, + "InpaintFillMode": { + "type": "string", + "enum": [ + "INPAINT_FILL_AUTO", + "INPAINT_FILL_NONE", + "INPAINT_FILL_SHUFFLE", + "INPAINT_FILL_REPEAT", + "INPAINT_FILL_AI", + "INPAINT_FILL_NOISE" + ], + "default": "INPAINT_FILL_AUTO" + }, + "InpaintParameters": { + "type": "object", + "properties": { + "fillMode": { + "$ref": "#/components/schemas/InpaintFillMode" + } + } + }, "LOIPoint": { "type": "object", "properties": { @@ -2371,6 +2403,9 @@ }, "hintPriority": { "$ref": "#/components/schemas/HintPriority" + }, + "inpaintParameters": { + "$ref": "#/components/schemas/InpaintParameters" } }, "description": "A set of parameters for each individual Prompt." diff --git a/gyre/generated/stablecabal.swagger.json b/gyre/generated/stablecabal.swagger.json index 4191321..c069aeb 100644 --- a/gyre/generated/stablecabal.swagger.json +++ b/gyre/generated/stablecabal.swagger.json @@ -1655,6 +1655,9 @@ "maskReuse": { "$ref": "#/definitions/ImageAdjustment_MaskReuse" }, + "maskSoftDilate": { + "$ref": "#/definitions/ImageAdjustment_MaskSoftDilate" + }, "engineId": { "type": "string" } @@ -1831,6 +1834,15 @@ }, "title": "Reuse the most recently predicted mask" }, + "ImageAdjustment_MaskSoftDilate": { + "type": "object", + "properties": { + "sigma": { + "type": "string", + "format": "uint64" + } + } + }, "ImageAdjustment_Normal": { "type": "object", "properties": { @@ -1978,6 +1990,26 @@ } } }, + "InpaintFillMode": { + "type": "string", + "enum": [ + "INPAINT_FILL_AUTO", + "INPAINT_FILL_NONE", + "INPAINT_FILL_SHUFFLE", + "INPAINT_FILL_REPEAT", + "INPAINT_FILL_AI", + "INPAINT_FILL_NOISE" + ], + "default": "INPAINT_FILL_AUTO" + }, + "InpaintParameters": { + "type": "object", + "properties": { + "fillMode": { + "$ref": "#/definitions/InpaintFillMode" + } + } + }, "LOIPoint": { "type": "object", "properties": { @@ -2221,7 +2253,11 @@ }, "hintPriority": { "$ref": "#/definitions/HintPriority", - "title": "Soecify the application mode for hints" + "title": "Specify the application mode for hints" + }, + "inpaintParameters": { + "$ref": "#/definitions/InpaintParameters", + "title": "Specify the inpaint controls for inpainting" } }, "description": "A set of parameters for each individual Prompt." diff --git a/gyre/manager.py b/gyre/manager.py index 77f5e5b..57107a3 100644 --- a/gyre/manager.py +++ b/gyre/manager.py @@ -2131,6 +2131,8 @@ def _return_pipeline_to_pool(self, slot): pipeline = slot.pipeline # Deactivate and remove it from the slot + slot.pipeline.subslot.deactivate() + slot.pipeline.subslot = None slot.pipeline.deactivate() slot.pipeline = None @@ -2154,6 +2156,7 @@ def _get_pipeline_from_pool(self, slot, id): # Assign the pipeline to the slot and activate slot.pipeline = pipeline + slot.pipeline.subslot = SubSlot(self, slot) slot.pipeline.activate(slot.device) return pipeline @@ -2204,6 +2207,7 @@ def with_engine(self, id=None, task=None): if not slot.pipeline: existing = False slot.pipeline = self._build_pipeline_for_engine(spec) + slot.pipeline.subslot = SubSlot(self, slot) slot.pipeline.activate(slot.device) if self._ram_monitor: @@ -2219,3 +2223,57 @@ def with_engine(self, id=None, task=None): self._device_queue.put(slot) # All done + + +class SubSlot: + def __init__(self, manager, slot): + self.manager = manager + self.superslot = slot + self.device = slot.device + self.pipeline = None + + def deactivate(self): + if self.pipeline: + self.manager._return_pipeline_to_pool(self) + + @contextmanager + def __call__(self, id=None, task=None): + # TODO: This is all duplicated from with_engine + + if id is None: + id = self.manager._defaults[task if task else "generate"] + + if id is None: + raise EngineNotFoundError("No engine ID provided and no default is set.") + + # Get the engine spec + spec = self.manager._find_spec(id=id) + if not spec or not spec.enabled: + raise EngineNotFoundError(f"Engine ID {id} doesn't exist or isn't enabled.") + + if task is not None and task != spec.task: + raise ValueError(f"Engine ID {id} is for task '{spec.task}' not '{task}'") + + try: + # Get pipeline (create if all pipelines for the id are busy) + + # If a pipeline is already active on this device slot, check if it's the right + # one. If not, deactivate it and clear it + if self.pipeline and self.pipeline.id != id: + self.manager._return_pipeline_to_pool(self) + + # If there's no pipeline on this device slot yet, find it (creating it + # if all the existing pipelines are busy) + if not self.pipeline: + self.manager._get_pipeline_from_pool(self, id) + + if not self.pipeline: + self.pipeline = self.manager._build_pipeline_for_engine(spec) + self.pipeline.with_subengine = SubSlot(self.manager, self) + self.pipeline.subslot = SubSlot(self.manager, self) + self.pipeline.activate(self.device) + + # Do the work + yield self.pipeline + finally: + pass diff --git a/gyre/pipeline/inpainting/zitspp_pipeline.py b/gyre/pipeline/inpainting/zitspp_pipeline.py index f29fe68..2644086 100644 --- a/gyre/pipeline/inpainting/zitspp_pipeline.py +++ b/gyre/pipeline/inpainting/zitspp_pipeline.py @@ -11,6 +11,7 @@ from .zitspp_nms import get_nms as get_np_nms # This is a pure pytorch version logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) import cv2 import numpy as np @@ -180,27 +181,36 @@ def __call__(self, image, mask, obj_removal=False): mask = images.normalise_tensor(mask, 1) # Harden mask - # mask[mask >= 0.999] = 1 - # mask[mask < 1] = 0 + mask[mask >= 0.999] = 1 + mask[mask < 1] = 0 - # image = image * (1 - mask) + # Calculate 256x256 versions of image and mask + image_256 = images.rescale(image, 256, 256, "strict", sharpness=0) + mask_256 = images.rescale(mask, 256, 256, "strict", sharpness=0) + mask_256[mask_256 >= 0.001] = 1 + mask_256[mask_256 < 1] = 0 + # Perform line detection line_256 = self.lsm_hasp_inference(image, mask, mask_th=0.85) + # Convert to CV img = images.toCV(image)[0] - img = img[:, :, ::-1] # ZitsPP works in RGB mode + img = img[:, :, ::-1].copy() # ZitsPP works in RGB mode + + img_256 = images.toCV(image_256)[0] + img_256 = img_256[:, :, ::-1].copy() # ZitsPP works in RGB mode + mask = images.toCV(mask)[0] + mask = (mask > 0).astype(np.uint8) * 255 + + mask_256 = images.toCV(mask_256)[0] + mask_256 = (mask_256 > 0).astype(np.uint8) * 255 # resize/crop if needed imgh, imgw, _ = img.shape - img_256 = resize(img, 256, 256) - - # load mask - mask = cv2.resize(mask, (imgw, imgh), interpolation=cv2.INTER_NEAREST) - mask = (mask > 127).astype(np.uint8) * 255 - mask_256 = cv2.resize(mask, (256, 256), interpolation=cv2.INTER_AREA) - mask_256[mask_256 > 0] = 255 + mask = mask[:, :, 0] + mask_256 = mask_256[:, :, 0] # load gradient img_gray = rgb2gray(img_256) * 255 @@ -220,7 +230,7 @@ def __call__(self, image, mask, obj_removal=False): ) batch = dict() - batch["image"] = to_tensor(img.copy(), norm=True) + batch["image"] = to_tensor(img, norm=True) batch["img_256"] = to_tensor(img_256, norm=True) batch["mask"] = to_tensor(mask) batch["mask_256"] = to_tensor(mask_256) diff --git a/gyre/pipeline/pipeline_wrapper.py b/gyre/pipeline/pipeline_wrapper.py index ba02570..8a9f0e5 100644 --- a/gyre/pipeline/pipeline_wrapper.py +++ b/gyre/pipeline/pipeline_wrapper.py @@ -11,7 +11,12 @@ from gyre.pipeline import pipeline_meta from gyre.pipeline.model_utils import GPUExclusionSet, clone_model -from gyre.pipeline.prompt_types import HintImage, ImageLike, PromptBatchLike +from gyre.pipeline.prompt_types import ( + HintImage, + ImageLike, + InpaintControl, + PromptBatchLike, +) from gyre.pipeline.samplers import build_sampler_set from gyre.pipeline.unified_pipeline import SCHEDULER_NOISE_TYPE from gyre.pipeline.xformers_utils import xformers_mea_available @@ -141,6 +146,8 @@ def activate(self, device, exclusion_set=None): ) ) + self._pipeline.subslot = self.subslot + def deactivate(self): if self._previous is None: raise Exception("Deactivate called without previous activate") @@ -318,6 +325,7 @@ def __call__( outmask_image: ImageLike | None = None, depth_map: ImageLike | None = None, hint_images: list[HintImage] | None = None, + inpaint_control: InpaintControl | None = None, # The strength of the img2img or inpaint process, if image is provided strength: float = None, # Lora @@ -373,6 +381,7 @@ def __call__( outmask_image=outmask_image, depth_map=depth_map, hint_images=hint_images, + inpaint_control=inpaint_control, strength=strength, lora=lora, token_embeddings=token_embeddings, diff --git a/gyre/pipeline/prompt_types.py b/gyre/pipeline/prompt_types.py index abad5c2..939874e 100644 --- a/gyre/pipeline/prompt_types.py +++ b/gyre/pipeline/prompt_types.py @@ -147,3 +147,14 @@ class LOIRectangle: class LocationsOfInterest: points: list[LOIPoint] rectangles: list[LOIRectangle] + + +InpaintFill = Literal["auto", "none", "shuffle", "repeat", "ai"] + + +@dataclass +class InpaintControl: + hint_type: str = "" + weight: float = 1.0 + priority: HintPriority = "balanced" + fill_mode: InpaintFill = "auto" diff --git a/gyre/pipeline/unet/hires_fix.py b/gyre/pipeline/unet/hires_fix.py index 95477b9..7181a82 100644 --- a/gyre/pipeline/unet/hires_fix.py +++ b/gyre/pipeline/unet/hires_fix.py @@ -2,8 +2,10 @@ import torch import torchvision.transforms as T +import logging -from gyre import resize_right +from gyre import resize_right, images +from gyre.logging import VisualRecord as vr from gyre.pipeline.easing import Easing from gyre.pipeline.randtools import batched_rand from gyre.pipeline.unet.types import ( @@ -14,6 +16,9 @@ XtTensor, ) +logger = logging.getLogger(__name__) # pylint: disable=invalid-name + + # Indexes into a shape for the height and width dimensions # Negative indexed to work for any number of dimensions Hi, Wi = -2, -1 @@ -46,13 +51,7 @@ def scale_into(latents, scale, target=None, target_shape=None, mode="lanczos"): if mode == "nearest": latents = resize_nearest(latents, scale) else: - latents = resize_right.resize( - latents, - scale_factors=scale, - interp_method=resize_right.interp_methods.lanczos2, - pad_mode="replicate", - antialiasing=False, - ) + latents = images.resize(latents, scale, sharpness=-1, clamp=False) if target is not None and target_shape is not None: raise ValueError("Only provide one of target or target_shape") @@ -194,13 +193,26 @@ def __call__(self, latents: XtTensor, __step, u: float) -> PX0Tensor | XtTensor: hi_merged = torch.where(randmap >= p, lo_upscaled, hi) # Expand lo back to full tensor size by wrapping with 0 - lo_expanded = torch.zeros_like(hi_merged) + lo_expanded = torch.ones_like(hi_merged) * lo_merged.mean( + dim=[2, 3], keepdim=True + ) lo_expanded[:, :, offseth : offseth + th, offsetw : offsetw + tw] = lo_merged self.latent_debugger.log( "hires", int(u * 1000), torch.cat([lo_expanded, hi_merged], dim=3)[0:1] ) + if False: + logger.debug( + vr( + "{hi} {lo} {hi_merged} {lo_expanded}", + hi=hi, + lo=lo, + lo_expanded=lo_expanded, + hi_merged=hi_merged, + ) + ) + res = torch.concat([lo_expanded, hi_merged]) return cast(type(hi), res) @@ -210,11 +222,40 @@ def image_to_natural( natural_size: int, image: torch.Tensor, oos_fraction: float, - fill=torch.zeros, + fill=None, + fill_blend=None, ): + target = None target_shape = [natural_size, natural_size] scale_factor = down_scale_factor(image.shape, target_shape, oos_fraction) - return scale_into(image, scale_factor, target_shape=target_shape) + + fill_base = None + if fill is not None: + fill_base = fill([*image.shape[:-2], natural_size, natural_size]) + + if fill is not None and fill_blend is None: + target, target_shape = fill_base, None + + result = scale_into( + image, scale_factor, target=target, target_shape=target_shape + ).clamp(0, 1) + + if fill_blend is not None: + assert fill_base is not None + + print(image.shape, scale_factor, natural_size) + + blend_map = torch.zeros([*image.shape[:-2], natural_size, natural_size]) + scale_into(torch.ones_like(image), scale_factor, target=blend_map) + blend_map = images.directionalblur(blend_map, fill_blend, "up") + + blend_map = blend_map.to(result) + fill_base = fill_base.to(result) + + orig = result + result = result * blend_map + fill_base * (1 - blend_map) + + return result @classmethod def merge_initial_latents(cls, left, right): diff --git a/gyre/pipeline/unified_pipeline.py b/gyre/pipeline/unified_pipeline.py index 6289ba2..e74c5f8 100644 --- a/gyre/pipeline/unified_pipeline.py +++ b/gyre/pipeline/unified_pipeline.py @@ -1,4 +1,5 @@ import contextlib +import functools import inspect import logging import math @@ -64,12 +65,13 @@ HintImage, HintPriority, ImageLike, + InpaintControl, MismatchedClipLayer, PromptBatch, PromptBatchLike, normalise_clip_layer, ) -from gyre.pipeline.randtools import TorchRandOverride, batched_randn +from gyre.pipeline.randtools import TorchRandOverride, batched_rand, batched_randn from gyre.pipeline.text_embedding import BasicTextEmbedding from gyre.pipeline.text_embedding.lpw_text_embedding import LPWTextEmbedding from gyre.pipeline.text_embedding.text_encoder_alt_layer import TextEncoderAltLayer @@ -265,20 +267,7 @@ def __init__( self.latents_dtype = latents_dtype self.batch_total = batch_total - self.image = self.preprocess_tensor(image) - - def preprocess_tensor(self, tensor): - # Make sure it's BCHW not just CHW - if tensor.ndim == 3: - tensor = tensor[None, ...] - # Strip any alpha - tensor = tensor[:, [0, 1, 2]] - # Adjust to -1 .. 1 - tensor = 2.0 * tensor - 1.0 - # TODO: resize & crop if it doesn't match width & height - - # Done - return tensor + self.image = images.normalise_tensor(image, 3) def _convertToLatents(self, image, mask=None): """ @@ -302,6 +291,8 @@ def _convertToLatents(self, image, mask=None): "This is probably a mistake" ) + image = image * 2 - 1 + image = image.to(device=self.device, dtype=self.pipeline.vae_dtype) if mask is not None: image = image * (mask > 0.5) @@ -397,7 +388,13 @@ def round_mask_low(self, mask): class EnhancedInpaintMode(Img2imgMode, MaskProcessorMixin): def __init__( - self, mask_image, num_inference_steps, strength, latent_debugger, **kwargs + self, + mask_image, + num_inference_steps, + strength, + latent_debugger, + filler, + **kwargs, ): # Check strength if strength < 0 or strength > 2: @@ -405,12 +402,9 @@ def __init__( f"The value of strength should in [0.0, 2.0] but is {strength}" ) - # When strength > 1, we start allowing the protected area to change too. Remember that and then set strength - # to 1 for parent class - self.fill_with_shaped_noise = strength >= 1.0 - - self.shaped_noise_strength = min(2 - strength, 1) - self.mask_scale = 1 + # Remember the filler for use later + self.filler = filler + self.filler_takes_latents = "latents" in inspect.signature(filler).parameters strength = min(strength, 1) @@ -425,17 +419,17 @@ def __init__( self.mask = self.mask.to(device=self.device, dtype=self.latents_dtype) # Remove any excluded pixels (0) - high_mask = self.round_mask_high(self.mask) - self.init_latents_orig = self._convertToLatents(self.image, high_mask) + self.high_mask = self.round_mask_high(self.mask) + self.init_latents_orig = self._convertToLatents(self.image, self.high_mask) # low_mask = self.round_mask_low(self.mask) - # blend_mask = self.mask * self.mask_scale + # blend_mask = self.mask self.latent_mask = self.mask_to_latent_mask(self.mask) self.latent_mask = torch.cat([self.latent_mask] * self.batch_total) self.latent_high_mask = self.round_mask_high(self.latent_mask) self.latent_low_mask = self.round_mask_low(self.latent_mask) - self.latent_blend_mask = self.latent_mask * self.mask_scale + self.latent_blend_mask = self.latent_mask self.latent_debugger = latent_debugger @@ -463,157 +457,21 @@ def _matchNorm(self, tensor, like, cf=1): norm_min = like.min() * cf return tensor * norm_range + norm_min - def _fillWithShapedNoise(self, init_latents, noise_mode=5): - """ - noise_mode sets the noise distribution prior to convolution with the latent - - 0: normal, matched to latent, 1: cauchy, matched to latent, 2: log_normal, - 3: standard normal (mean=0, std=1), 4: normal to scheduler SD - 5: random shuffle (does not convolve afterwards) - """ - - # HERE ARE ALL THE THINGS THAT GIVE BETTER OR WORSE RESULTS DEPENDING ON THE IMAGE: - noise_mask_factor = 1 # (1) How much to reduce noise during mask transition - lmask_mode = 3 # 3 (high_mask) seems consistently good. Options are 0 = none, 1 = low mask, 2 = mask as passed, 3 = high mask - nmask_mode = 0 # 1 or 3 seem good, 3 gives good blends slightly more often - fft_norm_mode = "ortho" # forward, backward or ortho. Doesn't seem to affect results too much - - # 0 == to sampler requested std deviation, 1 == to original image distribution - match_mode = 2 - - def latent_mask_for_mode(mode): - if mode == 1: - return self.latent_low_mask - elif mode == 2: - return self.latent_mask - else: - return self.latent_high_mask - - # Current theory: if we can match the noise to the image latents, we get a nice well scaled color blend between the two. - # The nmask mostly adjusts for incorrect scale. With correct scale, nmask hurts more than it helps - - # noise_mode = 0 matches well with nmask_mode = 0 - # nmask_mode = 1 or 3 matches well with noise_mode = 1 or 3 - - # Only consider the portion of the init image that aren't completely masked - masked_latents = init_latents - - latent_mask = None - - if lmask_mode > 0: - latent_mask = latent_mask_for_mode(lmask_mode) - masked_latents = masked_latents * latent_mask - - batch_noise = [] - - for generator, split_latents in zip(self.generators, masked_latents.split(1)): - # Generate some noise - noise = torch.zeros_like(split_latents) - if noise_mode == 0 and noise_mode < 1: - noise = noise.normal_( - generator=generator, - mean=split_latents.mean(), - std=split_latents.std(), - ) - elif noise_mode == 1 and noise_mode < 2: - noise = noise.cauchy_( - generator=generator, - median=split_latents.median(), - sigma=split_latents.std(), - ) - elif noise_mode == 2: - noise = noise.log_normal_(generator=generator) - noise = noise - noise.mean() - elif noise_mode == 3: - noise = noise.normal_(generator=generator) - elif noise_mode == 4: - targetSD = self.scheduler.scheduler.init_noise_sigma - noise = noise.normal_(generator=generator, mean=0, std=targetSD) - elif noise_mode == 5: - assert latent_mask is not None - # Seed the numpy RNG from the batch generator, so it's consistent - npseed = torch.randint( - low=0, - high=torch.iinfo(torch.int32).max, - size=[1], - generator=generator, - device=generator.device, - dtype=torch.int32, - ).cpu() - npgen = np.random.default_rng(npseed.numpy()) - # Fill each channel with random pixels selected from the good portion - # of the channel. I wish there was a way to do this in PyTorch :shrug: - channels = [] - for channel in split_latents.split(1, dim=1): - good_pixels = channel.masked_select(latent_mask[[0], [0]].ge(0.5)) - np_mixed = npgen.choice(good_pixels.cpu().numpy(), channel.shape) - channels.append( - torch.from_numpy(np_mixed).to(noise.device).to(noise.dtype) - ) - - # In noise mode 5 we don't convolve. The pixel shuffled noise is already extremely similar to the original in tone. - # We allow the user to request some portion is uncolored noise to allow outpaints that differ greatly from original tone - # (with an increasing risk of image discontinuity) - noise = ( - noise.to(generator.device) - .normal_(generator=generator) - .to(noise.device) - ) - noise = ( - noise * (1 - self.shaped_noise_strength) - + torch.cat(channels, dim=1) * self.shaped_noise_strength - ) - - batch_noise.append(noise) - continue - - elif noise_mode == 6: - noise = torch.ones_like(split_latents) - - # Make the noise less of a component of the convolution compared to the latent in the unmasked portion - if nmask_mode > 0: - noise_mask = latent_mask_for_mode(nmask_mode) - noise = noise.mul(1 - (noise_mask * noise_mask_factor)) - - # Color the noise by the latent - noise_fft = torch.fft.fftn(noise.to(torch.float32), norm=fft_norm_mode) - latent_fft = torch.fft.fftn( - split_latents.to(torch.float32), norm=fft_norm_mode - ) - convolve = noise_fft.mul(latent_fft) - noise = torch.fft.ifftn(convolve, norm=fft_norm_mode).real.to( - self.latents_dtype - ) - - # Stretch colored noise to match the image latent - if match_mode == 0: - noise = self._matchToSamplerSD(noise) - elif match_mode == 1: - noise = self._matchNorm(noise, split_latents, cf=1) - elif match_mode == 2: - noise = self._matchToSD(noise, 1) - - batch_noise.append(noise) - - noise = torch.cat(batch_noise, dim=0) - - # And mix resulting noise into the black areas of the mask - return (init_latents * self.latent_mask) + (noise * (1 - self.latent_mask)) - def generateLatents(self): - # Build initial latents from image the same as for img2img - init_latents = self._buildInitialLatents() - # If strength was >=1, filled exposed areas in mask with new, shaped noise - if self.fill_with_shaped_noise: - init_latents = self._fillWithShapedNoise(init_latents) + image = self.image - self.latent_debugger.log("shapednoise", 0, init_latents) + # Fill if using a filler than can't fill latents + if not self.filler_takes_latents: + image = self.filler(image=image, mask=self.high_mask) + # Build initial latents from image the same as for img2img + init_latents = self._convertToLatents(image) + # Fill if using a filler than does take latents + if self.filler_takes_latents: + init_latents = self.filler(latents=init_latents, mask=self.latent_high_mask) # Add the initial noise init_latents = self._addInitialNoise(init_latents) - self.latent_debugger.log("initnoise", 0, init_latents) - # And return return init_latents @@ -662,9 +520,6 @@ def __init__(self, do_classifier_free_guidance, *args, **kwargs): self.inpaint_mask_cache = {} self.masked_lantents_cache = {} - def _fillWithShapedNoise(self, init_latents): - return super()._fillWithShapedNoise(init_latents, noise_mode=5) - def wrap_unet(self, unet: NoisePredictionUNet) -> NoisePredictionUNet: def wrapped_unet(latents: XtTensor, t) -> EpsTensor: if latents.shape[0] == self.inpaint_mask.shape[0]: @@ -728,10 +583,10 @@ def for_model( if isinstance(model, t2i_adapter.T2iAdapter): return UnifiedPipelineHint_T2i( model, - image, clip_model, feature_extractor, fuser, + image, mask, weight, soft_injection, @@ -860,10 +715,10 @@ def style_setup(self): def __init__( self, model, - image, clip_model, feature_extractor, fuser, + image, mask, weight, soft_injection, @@ -1017,7 +872,7 @@ def __call__( if self.is_inpaint: # Adjust image to "inpaint protocol" (-1 for area to be inpainted) - mask = (mask < 0.5).float() + mask = (mask < 0.001).float() condition = (condition * (1 - mask) - mask).to(condition) if self.cfg_only: @@ -1742,6 +1597,53 @@ def text_encoder_for_unet(self, unet): else: return self.text_encoder + def _fill_none(self, image=None, latents=None, mask=None): + return image if image is not None else latents + + def _fill_auto(self, generators, strength=1.0, image=None, mask=None): + if strength < 1.0: + return image + + result = self._fill_ai(image=image, mask=mask) + + if strength > 1.0: + noise_weighting = strength - 1 + noise = self._fill_noise(generators=generators, image=image, mask=mask) + result = result * (1 - noise_weighting) + noise * noise_weighting + + return result + + def _fill_noise(self, generators, image=None, latents=None, mask=None): + # Harden mask + mask = (mask > 0.001).to(image) + # Latents get normal noise, images get linear noise + randfunc = batched_rand if image is not None else batched_randn + source = image if image is not None else latents + assert source is not None + # Get noise and return + noise = randfunc(source.shape, generators, source.device, source.dtype) + return source * mask + noise * (1 - mask) + + def _fill_ai(self, image=None, mask=None): + with self.subslot(task="inpaint") as inpainter: + mask = (mask > 0.001).to(image) + return inpainter( + image=image * mask + torch.ones_like(image) * 0.5 * (1 - mask), + mask=1 - mask, + ) + + def _fill_shuffle(self, image=None, latents=None, mask=None): + input = image if image is not None else latents + return images.infill( + input, 1 - mask, "shuffle", scale=1 if latents is not None else 0.25 + ) + + def _fill_repeat(self, image=None, latents=None, mask=None): + if image is not None: + return images.infill_fast(image, 1 - mask) + else: + return images.infill(latents, 1 - mask, "extend", scale=1) + @torch.no_grad() def __call__( self, @@ -1753,6 +1655,7 @@ def __call__( outmask_image: ImageLike | None = None, depth_map: ImageLike | None = None, hint_images: list[HintImage] | None = None, + inpaint_control: InpaintControl | None = None, strength: float | None = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, @@ -1997,19 +1900,40 @@ def __call__( if outmask_image is not None and image is None: raise ValueError("Can't pass a outmask without an image") - depth_map = None - can_use_depth_unet = False - if self.depth_unet is not None and mask_image is None: - can_use_depth_unet = True + if image is not None: + if isinstance(image, PILImage): + image = images.fromPIL(image) + image = images.normalise_tensor(image, 3) - if image is not None and isinstance(image, PILImage): - image = images.fromPIL(image) + if mask_image is not None: + if isinstance(mask_image, PILImage): + mask_image = images.fromPIL(mask_image) + mask_image = images.normalise_tensor(mask_image, 1) + + if outmask_image is not None: + if isinstance(outmask_image, PILImage): + outmask_image = images.fromPIL(outmask_image) + outmask_image = images.normalise_tensor(outmask_image, 1) + + if inpaint_control is None: + inpaint_control = InpaintControl() + + filler = getattr(self, "_fill_" + inpaint_control.fill_mode) + filler_kwargs = {} - if mask_image is not None and isinstance(mask_image, PILImage): - mask_image = images.fromPIL(mask_image) + if "strength" in inspect.signature(filler).parameters: + filler_kwargs["strength"] = strength + if "generators" in inspect.signature(filler).parameters: + filler_kwargs["generators"] = generators - if outmask_image is not None and isinstance(outmask_image, PILImage): - outmask_image = images.fromPIL(outmask_image) + if filler_kwargs: + filler = functools.partial(filler, **filler_kwargs) + + inpaint_mode = None + can_use_inpaint_unet = self.inpaint_unet is not None + + depth_map = None + can_use_depth_unet = self.depth_unet is not None leaf_args: dict[str, Any] = dict( hints=[], @@ -2017,7 +1941,60 @@ def __call__( unet=self.unet, ) + # Figure out the base inpaint mode + + if mask_image is not None: + hint_type = inpaint_control.hint_type or "inpaint" + + if hint_type == "inpaint" and can_use_inpaint_unet: + hint_type = "inpaint/unet" + + if hint_type == "inpaint/unet": + if not can_use_inpaint_unet: + raise ValueError( + "Hint type of inpaint/unet, but an inpaint unet is not available" + ) + + inpaint_mode = "unet" + can_use_inpaint_unet = can_use_depth_unet = False + + else: + handler_models = None + if self.hintset_manager: + handler_models = self.hintset_manager.for_type(hint_type, None) + + if handler_models: + weight, priority = inpaint_control.weight, inpaint_control.priority + + inpaint_mode = "hint" + leaf_args["hints"].append( + UnifiedPipelineHint.for_model( + handler_models, + image, + mask=1 - mask_image, + weight=weight, + soft_injection=priority in {"prompt", "hint"}, + cfg_only=priority == "hint", + batch_total=batch_total, + _meta={"priority": priority}, + ) + ) + + # Final fallback, if no hint handler for inpaint and hint_type was not more explicit + elif hint_type == "inpaint": + inpaint_mode = "legacy" + + else: + raise EnvironmentError( + f"Pipeline doesn't know how to handle hint image of type {hint_type}" + ) + if hint_images is not None: + # Peek into hint images to see if any explicitly require depth/unet + has_du_hint = any( + (hint_image.hint_type == "depth/unet" for hint_image in hint_images) + ) + for hint_image in hint_images: hint_tensor = hint_image.image hint_type = hint_image.hint_type @@ -2028,8 +2005,15 @@ def __call__( if isinstance(hint_tensor, PILImage): hint_tensor = images.fromPIL(hint_tensor) - # If this model has a depth_unet, use it for preference - if hint_type == "depth" and can_use_depth_unet: + if hint_type == "depth" and can_use_depth_unet and not has_du_hint: + hint_type = "depth/unet" + + if hint_type == "depth/unet": + if not can_use_depth_unet: + raise ValueError( + "Hint type of depth/unet, but a depth unet is not available" + ) + logger.debug("Using unet for depth") depth_map = images.normalise_tensor(hint_tensor, 1) @@ -2038,6 +2022,8 @@ def __call__( leaf_args["depth_map"] = 2.0 * depth_map - 1.0 leaf_args["unet"] = self.depth_unet + can_use_inpaint_unet = can_use_depth_unet = False + else: handler_models = None @@ -2074,11 +2060,6 @@ def __call__( f"Pipeline doesn't know how to handle hint image of type {hint_type}" ) - # Find the first hint (if any) that is an inpaint hint - inpaint_hint = next( - (hint for hint in leaf_args["hints"] if hint.is_inpaint), None - ) - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @@ -2088,18 +2069,13 @@ def __call__( mode_tree = ModeTreeRoot() - if mask_image is not None: - if self.inpaint_unet is not None: - mode_class = EnhancedRunwayInpaintMode - leaf_args["unet"] = self.inpaint_unet - else: - mode_class = EnhancedInpaintMode + if inpaint_mode == "unet": + mode_class = EnhancedRunwayInpaintMode + leaf_args["unet"] = self.inpaint_unet + elif inpaint_mode == "hint" or inpaint_mode == "legacy": + mode_class = EnhancedInpaintMode elif image is not None: mode_class = Img2imgMode - elif inpaint_hint: - mode_class = EnhancedInpaintMode - image, mask_image = inpaint_hint.image, (1 - inpaint_hint.mask) - outmask_image = mask_image else: mode_class = Txt2imgMode @@ -2159,7 +2135,8 @@ def get_natural_opts(child_opts): def image_to_natural( image: torch.Tensor | None, - fill: Callable = torch.zeros, + fill: Callable | None = None, + fill_blend: float | None = None, size: int = unet_pixel_size, ): return ( @@ -2170,6 +2147,7 @@ def image_to_natural( image, oos_fraction=hires_oos_fraction, fill=fill, + fill_blend=fill_blend, ) ) @@ -2186,7 +2164,7 @@ def image_to_natural( hint.extend( lambda image, mask, **_: { "image": image_to_natural(image), - "mask": image_to_natural(mask) + "mask": image_to_natural(mask, torch.zeros) if mask is not None else mask, } @@ -2404,6 +2382,7 @@ def set_adaptive_soft_injection(child_opts): height=height, image=image, mask_image=mask_image, + filler=filler, depth_map=depth_map, latents_dtype=latents_dtype, batch_total=batch_total, @@ -2536,11 +2515,9 @@ def set_adaptive_soft_injection(child_opts): if image is not None and outmask_image is not None: outmask = torch.cat([outmask_image] * batch_total) - outmask = outmask[:, [0]] outmask = outmask.to(result_image) source = torch.cat([image] * batch_total) - source = source[:, [0, 1, 2]] source = source.to(result_image) # We copy the result over the replacement are of the source to build a histogram reference diff --git a/gyre/services/generate.py b/gyre/services/generate.py index 38726fe..376940e 100644 --- a/gyre/services/generate.py +++ b/gyre/services/generate.py @@ -28,6 +28,7 @@ from gyre.pipeline.prompt_types import ( HintImage, HintPriority, + InpaintControl, LOIPoint, LOIRectangle, LocationsOfInterest, @@ -372,7 +373,7 @@ def apply_image_adjustment( if mode == BRM.ALPHA: tensor = torch.cat([tensor, bgmask], dim=1) elif mode == BRM.BLUR: - bg = images.infill(tensor, bgmask, 26) + bg = images.infill_fast(tensor, bgmask, 26) bg = images.gaussianblur(bg, 13) tensor = tensor * bgmask + bg * (1 - bgmask) elif mode == BRM.SOLID: @@ -449,7 +450,7 @@ def apply_image_adjustment( if behaviour == generation_pb2.MASK_AS_ALPHA: tensor = torch.cat([tensor, bgmask], dim=1) elif behaviour == generation_pb2.MASK_OVER_BLUR: - bg = images.infill(tensor, bgmask, 26) + bg = images.infill_fast(tensor, bgmask, 26) bg = images.gaussianblur(bg, 13) tensor = tensor * bgmask + bg * (1 - bgmask) elif behaviour == generation_pb2.MASK_OVER_SOLID: @@ -457,6 +458,14 @@ def apply_image_adjustment( else: raise ValueError("Unknown background removal mode") + elif which == "mask_soft_dilate": + sigma = 32 + if adjustment.mask_soft_dilate.HasField("sigma"): + sigma = adjustment.mask_soft_dilate.sigma + + tensor = images.levels(tensor, 0, 0.001, 0, 1) + tensor = images.directionalblur(tensor, sigma, "up") + else: raise ValueError(f"Unkown image adjustment {which}") @@ -866,6 +875,55 @@ def image(self): self._image_from_artifact(prompt.artifact), ) + def _fill_mode_from_prompt(self, prompt, default="auto"): + if ( + prompt.HasField("parameters") + and prompt.parameters.HasField("inpaint_parameters") + and prompt.parameters.inpaint_parameters.HasField("fill_mode") + ): + mode = prompt.parameters.inpaint_parameters.fill_mode + return ( + generation_pb2.InpaintFillMode.Name(mode) + .lower() + .replace("inpaint_fill_", "") + ) + + return default + + def _hint_parameters_from_prompt(self, prompt, default_hint_type=None): + hint_type = prompt.artifact.hint_image_type or default_hint_type + weight = 1.0 + priority = "balanced" + + if prompt.HasField("parameters"): + if prompt.parameters.HasField("weight"): + weight = prompt.parameters.weight + + if prompt.parameters.HasField("hint_priority"): + priority_table: dict[Any, HintPriority] = { + generation_pb2.HINT_BALANCED: "balanced", + generation_pb2.HINT_PRIORITISE_HINT: "hint", + generation_pb2.HINT_PRIORITISE_PROMPT: "prompt", + generation_pb2.HINT_ADAPTIVE: "adaptive", + } + + priority = priority_table[prompt.parameters.hint_priority] + + return SN(weight=weight, hint_type=hint_type, priority=priority) + + def inpaint_control(self): + for prompt in self._prompt_of_type("artifact"): + if prompt.artifact.type == generation_pb2.ARTIFACT_IMAGE: + fill_mode = self._fill_mode_from_prompt(prompt) + hint_params = self._hint_parameters_from_prompt(prompt, "inpaint") + + return InpaintControl( + fill_mode=fill_mode, + hint_type=hint_params.hint_type, + weight=hint_params.weight, + priority=hint_params.priority, + ) + def mask_image(self): for prompt in self._prompt_of_type("artifact"): if prompt.artifact.type == generation_pb2.ARTIFACT_MASK: @@ -888,42 +946,26 @@ def hint_images(self): generation_pb2.ARTIFACT_HINT_IMAGE, generation_pb2.ARTIFACT_DEPTH, }: - # Calculate weight - weight = 1.0 - if prompt.HasField("parameters"): - if prompt.parameters.HasField("weight"): - weight = prompt.parameters.weight - - # Build the actual image - hint_image = self._add_to_echo( - prompt, - self._image_from_artifact(prompt.artifact), - ) - - # Find the hint type (to handle deprecated ARTIFACT_DEPTH type) if prompt.artifact.type == generation_pb2.ARTIFACT_DEPTH: - hint_type = "depth" + default_type = "depth" else: - hint_type = prompt.artifact.hint_image_type + default_type = "" + + # Get the hint parameters + hint_params = self._hint_parameters_from_prompt(prompt, default_type) - priority = "balanced" - if prompt.parameters.HasField("hint_priority"): - priority_table: dict[Any, HintPriority] = { - generation_pb2.HINT_BALANCED: "balanced", - generation_pb2.HINT_PRIORITISE_HINT: "hint", - generation_pb2.HINT_PRIORITISE_PROMPT: "prompt", - generation_pb2.HINT_ADAPTIVE: "adaptive", - } + # Build the actual image + hint_image = self._image_from_artifact(prompt.artifact) - priority = priority_table[prompt.parameters.hint_priority] + self._add_to_echo(prompt, hint_image) # And append the details hint_images.append( HintImage( image=hint_image, - hint_type=hint_type, - weight=weight, - priority=priority, + hint_type=hint_params.hint_type, + weight=hint_params.weight, + priority=hint_params.priority, clip_layer=self._clip_layer_from_prompt(prompt), ) )