diff --git a/0.5.1/404.html b/0.5.1/404.html index 5c1fd6c..b4a037e 100644 --- a/0.5.1/404.html +++ b/0.5.1/404.html @@ -306,6 +306,26 @@ +
  • + + + + + Schema considerations + + + + +
  • + + + + + + + + +
  • @@ -333,6 +353,8 @@ + + @@ -340,10 +362,10 @@ - + - diff --git a/0.5.1/api/arrow/index.html b/0.5.1/api/arrow/index.html index 5e8dfdf..17bb1c8 100644 --- a/0.5.1/api/arrow/index.html +++ b/0.5.1/api/arrow/index.html @@ -343,6 +343,26 @@ +
  • + + + + + Schema considerations + + + + +
  • + + + + + + + + +
  • @@ -372,6 +392,8 @@ + + @@ -379,10 +401,10 @@ - + - @@ -849,7 +912,7 @@

    parse_stac_items_to_arrow(
    -    items: Iterable[dict[str, Any]],
    +    items: Iterable[Item | dict[str, Any]],
         *,
         chunk_size: int = 8192,
         schema: Schema | InferredSchema | None = None
    @@ -869,7 +932,7 @@ 

  • items - (Iterable[dict[str, Any]]) + (Iterable[Item | dict[str, Any]]) –

    the STAC Items to convert

    diff --git a/0.5.1/api/legacy/index.html b/0.5.1/api/legacy/index.html index a973909..639bdd6 100644 --- a/0.5.1/api/legacy/index.html +++ b/0.5.1/api/legacy/index.html @@ -16,6 +16,8 @@ + + @@ -341,6 +343,26 @@ +
  • + + + + + Schema considerations + + + + +
  • + + + + + + + + +
  • @@ -370,6 +392,8 @@ + + @@ -377,10 +401,10 @@ - + -
  • @@ -587,6 +652,20 @@

    Direct GeoPandas conversion (Legacy)

    The API listed here was the initial non-Arrow-based STAC-GeoParquet implementation, converting between JSON and GeoPandas directly. For large collections of STAC items, using the new Arrow-based functionality (under the stac_geoparquet.arrow namespace) will be more performant.

    +

    Note that stac_geoparquet lifts the keys in the item properties up to the top level of the DataFrame, similar to geopandas.GeoDataFrame.from_features.

    +
    >>> import requests
    +>>> import stac_geoparquet.arrow
    +>>> import pyarrow.parquet
    +>>> import pyarrow as pa
    +
    +>>> items = requests.get(
    +...     "https://planetarycomputer.microsoft.com/api/stac/v1/collections/sentinel-2-l2a/items"
    +... ).json()["features"]
    +>>> table = pa.Table.from_batches(stac_geoparquet.arrow.parse_stac_items_to_arrow(items))
    +>>> stac_geoparquet.arrow.to_parquet(table, "items.parquet")
    +>>> table2 = pyarrow.parquet.read_table("items.parquet")
    +>>> items2 = list(stac_geoparquet.arrow.stac_table_to_items(table2))
    +
    diff --git a/0.5.1/api/pgstac/index.html b/0.5.1/api/pgstac/index.html new file mode 100644 index 0000000..012d1f6 --- /dev/null +++ b/0.5.1/api/pgstac/index.html @@ -0,0 +1,1367 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + pgstac integration - stac-geoparquet + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + + + + +

    pgstac integration

    +

    stac_geoparquet.pgstac_reader has some helpers for working with items coming from a pgstac.items table. It takes care of

    +
      +
    • Rehydrating the dehydrated items
    • +
    • Partitioning by time
    • +
    • Injecting dynamic links and assets from a STAC API
    • +
    + + +
    + + + +

    + stac_geoparquet.pgstac_reader.CollectionConfig + + + + dataclass + + +

    + + +
    + + +

    Additional collection-based configuration to inject, matching the +dynamic properties from the API.

    + + + + +
    + + + + + + + +
    + + + +

    + collection + + + + property + + +

    +
    collection: Collection
    +
    + +
    +
    + +
    + +
    + + + +

    + collection_id + + + + instance-attribute + + +

    +
    collection_id: str
    +
    + +
    +
    + +
    + +
    + + + +

    + partition_frequency + + + + class-attribute + instance-attribute + + +

    +
    partition_frequency: str | None = None
    +
    + +
    +
    + +
    + +
    + + + +

    + render_config + + + + class-attribute + instance-attribute + + +

    +
    render_config: str | None = None
    +
    + +
    +
    + +
    + +
    + + + +

    + should_inject_dynamic_properties + + + + class-attribute + instance-attribute + + +

    +
    should_inject_dynamic_properties: bool = True
    +
    + +
    +
    + +
    + +
    + + + +

    + stac_api + + + + class-attribute + instance-attribute + + +

    +
    stac_api: str = 'https://planetarycomputer.microsoft.com/api/stac/v1'
    +
    + +
    +
    + +
    + + + +
    + + +

    + __init__ + + +

    +
    __init__(
    +    collection_id: str,
    +    partition_frequency: str | None = None,
    +    stac_api: str = "https://planetarycomputer.microsoft.com/api/stac/v1",
    +    should_inject_dynamic_properties: bool = True,
    +    render_config: str | None = None,
    +) -> None
    +
    + +
    + +
    + +
    + +
    + + +

    + __post_init__ + + +

    +
    __post_init__() -> None
    +
    + +
    + +
    + +
    + +
    + + +

    + export_collection + + +

    +
    export_collection(
    +    conninfo: str,
    +    output_protocol: str,
    +    output_path: str,
    +    storage_options: dict[str, Any],
    +    rewrite: bool = False,
    +    skip_empty_partitions: bool = False,
    +) -> list[str | None]
    +
    + +
    + +
    + +
    + +
    + + +

    + export_partition + + +

    +
    export_partition(
    +    conninfo: str,
    +    query: str,
    +    output_protocol: str,
    +    output_path: str,
    +    storage_options: dict[str, Any] | None = None,
    +    rewrite: bool = False,
    +    skip_empty_partitions: bool = False,
    +) -> str | None
    +
    + +
    + +
    + +
    + +
    + + +

    + export_partition_for_endpoints + + +

    +
    export_partition_for_endpoints(
    +    endpoints: tuple[datetime, datetime],
    +    conninfo: str,
    +    output_protocol: str,
    +    output_path: str,
    +    storage_options: dict[str, Any],
    +    part_number: int | None = None,
    +    total: int | None = None,
    +    rewrite: bool = False,
    +    skip_empty_partitions: bool = False,
    +) -> str | None
    +
    + +
    + +

    Export results for a pair of endpoints.

    + +
    + +
    + +
    + + +

    + generate_endpoints + + +

    +
    generate_endpoints(
    +    since: datetime | None = None,
    +) -> list[tuple[datetime, datetime]]
    +
    + +
    + +
    + +
    + +
    + + +

    + inject_assets + + +

    +
    inject_assets(item: dict[str, Any]) -> None
    +
    + +
    + +
    + +
    + +
    + + + +
    inject_links(item: dict[str, Any]) -> None
    +
    + +
    + +
    + +
    + +
    + + +

    + make_pgstac_items + + +

    +
    make_pgstac_items(
    +    records: list[tuple[str, str, str, datetime, datetime, dict[str, Any]]],
    +    base_item: dict[str, Any],
    +) -> list[dict[str, Any]]
    +
    + +
    + +

    Make STAC items out of pgstac records.

    + + +

    Parameters:

    +
      +
    • + records + (list[tuple[str, str, str, datetime, datetime, dict[str, Any]]]) + – +
      +

      list[tuple] +The dehydrated records from pgstac.items table.

      +
      +
    • +
    • + base_item + (dict[str, Any]) + – +
      +

      dict[str, Any] +The base item from the collection_base_item pgstac function for this +collection. Used for rehydration

      +
      +
    • +
    + +
    + +
    + + + +
    + +
    + +
    + + + + + + + + + + + + + + + + +
    +
    + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/0.5.1/assets/images/social/api/pgstac.png b/0.5.1/assets/images/social/api/pgstac.png new file mode 100644 index 0000000..d5c806e Binary files /dev/null and b/0.5.1/assets/images/social/api/pgstac.png differ diff --git a/0.5.1/assets/images/social/drawbacks.png b/0.5.1/assets/images/social/drawbacks.png new file mode 100644 index 0000000..6cc6213 Binary files /dev/null and b/0.5.1/assets/images/social/drawbacks.png differ diff --git a/0.5.1/assets/images/social/schema.png b/0.5.1/assets/images/social/schema.png new file mode 100644 index 0000000..8282f93 Binary files /dev/null and b/0.5.1/assets/images/social/schema.png differ diff --git a/0.5.1/drawbacks/index.html b/0.5.1/drawbacks/index.html new file mode 100644 index 0000000..8d01da9 --- /dev/null +++ b/0.5.1/drawbacks/index.html @@ -0,0 +1,756 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Drawbacks - stac-geoparquet + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + + + + +

    Drawbacks

    +

    Trying to represent STAC data in GeoParquet has some drawbacks.

    +

    Unable to represent undefined values

    +

    Parquet is unable to represent the difference between undefined and null, and so is unable to perfectly round-trip STAC data with undefined values.

    +

    In JSON a value can have one of three states: defined, undefined, or null. The "b" key in the next three examples illustrates this:

    +

    Defined:

    +
    {
    +  "a": 1,
    +  "b": "foo"
    +}
    +
    +

    Undefined:

    +
    {
    +  "a": 2
    +}
    +
    +

    Null:

    +
    {
    +  "a": 3,
    +  "b": null
    +}
    +
    +

    Because Parquet is a columnar format, it is only able to represent undefined at the column level. So if those three JSON items above were converted to Parquet, the column "b" would exist because it exists in the first and third item, and the second item would have "b" inferred as null:

    + + + + + + + + + + + + + + + + + + + + + +
    ab
    1"foo"
    2null
    3null
    +

    Then when the second item is converted back to JSON, it will be returned as

    +
    {
    +  "a": 2
    +  "b": null
    +}
    +
    +

    which is not strictly equal to the input.

    +

    Schema difficulties

    +

    JSON is schemaless while Parquet requires a strict schema, and it can be very difficult to unite these two systems. This is such an important consideration that we have a documentation page just to discuss this point.

    + + + + + + + + + + + + + + + + +
    +
    + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/0.5.1/index.html b/0.5.1/index.html index 8a397df..ffea1cb 100644 --- a/0.5.1/index.html +++ b/0.5.1/index.html @@ -357,18 +357,9 @@
  • - + - Usage - - - -
  • - -
  • - - - pgstac integration + Documentation @@ -408,6 +399,26 @@ +
  • + + + + + Schema considerations + + + + +
  • + + + + + + + + +
  • @@ -435,6 +446,8 @@ + + @@ -442,10 +455,10 @@ - + - @@ -548,18 +602,9 @@
  • - - - Usage - - - -
  • - -
  • - + - pgstac integration + Documentation @@ -590,32 +635,8 @@

    PurposeSTAC API, STAC GeoParquet allows users to access a large number of STAC items in bulk without making repeated HTTP requests.

    For analytic questions like "find the items in the Sentinel-2 collection in June 2024 over New York City with cloud cover of less than 20%" it can be much, much faster to find the relevant data from a GeoParquet source than from JSON, because GeoParquet needs to load only the relevant columns for that query, not the full data.

    See the STAC-GeoParquet specification for details on the exact schema of the written Parquet files.

    -

    Usage

    -

    Use stac_geoparquet.to_arrow.stac_items_to_arrow and -stac_geoparquet.from_arrow.stac_table_to_items to convert between STAC items -and Arrow tables. Arrow Tables of STAC items can be written to parquet with -stac_geoparquet.to_parquet.to_parquet.

    -

    Note that stac_geoparquet lifts the keys in the item properties up to the top level of the DataFrame, similar to geopandas.GeoDataFrame.from_features.

    -
    >>> import requests
    ->>> import stac_geoparquet.arrow
    ->>> import pyarrow.parquet
    ->>> import pyarrow as pa
    -
    ->>> items = requests.get(
    -...     "https://planetarycomputer.microsoft.com/api/stac/v1/collections/sentinel-2-l2a/items"
    -... ).json()["features"]
    ->>> table = pa.Table.from_batches(stac_geoparquet.arrow.parse_stac_items_to_arrow(items))
    ->>> stac_geoparquet.arrow.to_parquet(table, "items.parquet")
    ->>> table2 = pyarrow.parquet.read_table("items.parquet")
    ->>> items2 = list(stac_geoparquet.arrow.stac_table_to_items(table2))
    -
    -

    pgstac integration

    -

    stac_geoparquet.pgstac_reader has some helpers for working with items coming from a pgstac.items table. It takes care of

    - +

    Documentation

    +

    Documentation website

    diff --git a/0.5.1/objects.inv b/0.5.1/objects.inv index 2e48718..0fc96ba 100644 Binary files a/0.5.1/objects.inv and b/0.5.1/objects.inv differ diff --git a/0.5.1/schema/index.html b/0.5.1/schema/index.html new file mode 100644 index 0000000..067a9e8 --- /dev/null +++ b/0.5.1/schema/index.html @@ -0,0 +1,794 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Schema considerations - stac-geoparquet + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + + + + +

    Schema considerations

    +

    A STAC Item is a JSON object to describe an external geospatial dataset. The STAC specification defines a common core, plus a variety of extensions. Additionally, STAC Items may include custom extensions outside the common ones. Crucially, the majority of the specified fields in the core spec and extensions define optional keys. Those keys often differ across STAC collections and may even differ within a single collection across items.

    +

    STAC's flexibility is a blessing and a curse. The flexibility of schemaless JSON allows for very easy writing as each object can be dumped separately to JSON. Every item is allowed to have a different schema. And newer items are free to have a different schema than older items in the same collection. But this write-time flexibility makes it harder to read as there are no guarantees (outside STAC's few required fields) about what fields exist.

    +

    Parquet is the complete opposite of JSON. Parquet has a strict schema that must be known before writing can start. This puts the burden of work onto the writer instead of the reader. Reading Parquet is very efficient because the file's metadata defines the exact schema of every record. This also enables use cases like reading specific columns that would not be possible without a strict schema.

    +

    This conversion from schemaless to strict-schema is the difficult part of converting STAC from JSON to GeoParquet, especially for large input datasets like STAC that are often larger than memory.

    +

    Full scan over input data

    +

    The most foolproof way to convert STAC JSON to GeoParquet is to perform a full scan over input data. This is done automatically by parse_stac_ndjson_to_arrow when a schema is not provided.

    +

    This is time consuming as it requires two full passes over the input data: once to infer a common schema and again to actually write to Parquet (though items are never fully held in memory, allowing this process to scale).

    +

    User-provided schema

    +

    Alternatively, the user can pass in an Arrow schema themselves using the schema parameter of parse_stac_ndjson_to_arrow. This schema must match the on-disk schema of the the STAC JSON data.

    +

    Multiple schemas per collection

    +

    It is also possible to write multiple Parquet files with STAC data where each Parquet file may have a different schema. This simplifies the conversion and writing process but makes reading and using the Parquet data harder.

    +

    Merging data with schema mismatch

    +

    If you've created STAC GeoParquet data where the schema has updated, you can use pyarrow.concat_tables with promote_options="permissive" to combine multiple STAC GeoParquet files.

    +
    import pyarrow as pa
    +import pyarrow.parquet as pq
    +
    +table_1 = pq.read_table("stac1.parquet")
    +table_2 = pq.read_table("stac2.parquet")
    +combined_table = pa.concat_tables([table1, table2], promote_options="permissive")
    +
    +

    Future work

    +

    Schema operations is an area where future work can improve reliability and ease of use of STAC GeoParquet.

    +

    It's possible that in the future we could automatically infer an Arrow schema from the STAC specification's published JSON Schema files. If you're interested in this, open an issue and discuss.

    + + + + + + + + + + + + + + + + +
    +
    + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/0.5.1/search/search_index.json b/0.5.1/search/search_index.json index de3d393..582ab72 100644 --- a/0.5.1/search/search_index.json +++ b/0.5.1/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"STAC-geoparquet","text":"

    Convert STAC items between JSON, GeoParquet, pgstac, and Delta Lake.

    "},{"location":"#purpose","title":"Purpose","text":"

    The STAC spec defines a JSON-based schema. But it can be hard to manage and search through many millions of STAC items in JSON format. For one, JSON is very large on disk. And you need to parse the entire JSON data into memory to extract just a small piece of information, say the datetime and one asset of an Item.

    GeoParquet can be a good complement to JSON for many bulk-access and analytic use cases. While STAC Items are commonly distributed as individual JSON files on object storage or through a STAC API, STAC GeoParquet allows users to access a large number of STAC items in bulk without making repeated HTTP requests.

    For analytic questions like \"find the items in the Sentinel-2 collection in June 2024 over New York City with cloud cover of less than 20%\" it can be much, much faster to find the relevant data from a GeoParquet source than from JSON, because GeoParquet needs to load only the relevant columns for that query, not the full data.

    See the STAC-GeoParquet specification for details on the exact schema of the written Parquet files.

    "},{"location":"#usage","title":"Usage","text":"

    Use stac_geoparquet.to_arrow.stac_items_to_arrow and stac_geoparquet.from_arrow.stac_table_to_items to convert between STAC items and Arrow tables. Arrow Tables of STAC items can be written to parquet with stac_geoparquet.to_parquet.to_parquet.

    Note that stac_geoparquet lifts the keys in the item properties up to the top level of the DataFrame, similar to geopandas.GeoDataFrame.from_features.

    >>> import requests\n>>> import stac_geoparquet.arrow\n>>> import pyarrow.parquet\n>>> import pyarrow as pa\n\n>>> items = requests.get(\n...     \"https://planetarycomputer.microsoft.com/api/stac/v1/collections/sentinel-2-l2a/items\"\n... ).json()[\"features\"]\n>>> table = pa.Table.from_batches(stac_geoparquet.arrow.parse_stac_items_to_arrow(items))\n>>> stac_geoparquet.arrow.to_parquet(table, \"items.parquet\")\n>>> table2 = pyarrow.parquet.read_table(\"items.parquet\")\n>>> items2 = list(stac_geoparquet.arrow.stac_table_to_items(table2))\n
    "},{"location":"#pgstac-integration","title":"pgstac integration","text":"

    stac_geoparquet.pgstac_reader has some helpers for working with items coming from a pgstac.items table. It takes care of

    "},{"location":"usage/","title":"Usage","text":""},{"location":"api/arrow/","title":"stac_geoparquet.arrow","text":"

    Arrow-based format conversions.

    "},{"location":"api/arrow/#stac_geoparquet.arrow","title":"stac_geoparquet.arrow","text":""},{"location":"api/arrow/#stac_geoparquet.arrow.DEFAULT_JSON_CHUNK_SIZE","title":"DEFAULT_JSON_CHUNK_SIZE module-attribute","text":"
    DEFAULT_JSON_CHUNK_SIZE = 65536\n

    The default chunk size to use for reading JSON into memory.

    "},{"location":"api/arrow/#stac_geoparquet.arrow.DEFAULT_PARQUET_SCHEMA_VERSION","title":"DEFAULT_PARQUET_SCHEMA_VERSION module-attribute","text":"
    DEFAULT_PARQUET_SCHEMA_VERSION: SUPPORTED_PARQUET_SCHEMA_VERSIONS = '1.1.0'\n

    The default GeoParquet schema version written to file.

    "},{"location":"api/arrow/#stac_geoparquet.arrow.SUPPORTED_PARQUET_SCHEMA_VERSIONS","title":"SUPPORTED_PARQUET_SCHEMA_VERSIONS module-attribute","text":"
    SUPPORTED_PARQUET_SCHEMA_VERSIONS = Literal['1.0.0', '1.1.0']\n

    A Literal type with the supported GeoParquet schema versions.

    "},{"location":"api/arrow/#stac_geoparquet.arrow.parse_stac_items_to_arrow","title":"parse_stac_items_to_arrow","text":"
    parse_stac_items_to_arrow(\n    items: Iterable[dict[str, Any]],\n    *,\n    chunk_size: int = 8192,\n    schema: Schema | InferredSchema | None = None\n) -> Iterable[RecordBatch]\n

    Parse a collection of STAC Items to an iterable of pyarrow.RecordBatch.

    The objects under properties are moved up to the top-level of the Table, similar to geopandas.GeoDataFrame.from_features.

    Parameters:

    Returns:

    "},{"location":"api/arrow/#stac_geoparquet.arrow.parse_stac_ndjson_to_arrow","title":"parse_stac_ndjson_to_arrow","text":"
    parse_stac_ndjson_to_arrow(\n    path: str | Path | Iterable[str | Path],\n    *,\n    chunk_size: int = DEFAULT_JSON_CHUNK_SIZE,\n    schema: Schema | None = None,\n    limit: int | None = None\n) -> Iterator[RecordBatch]\n

    Convert one or more newline-delimited JSON STAC files to a generator of Arrow RecordBatches.

    Each RecordBatch in the returned iterator is guaranteed to have an identical schema, and can be used to write to one or more Parquet files.

    Parameters:

    Other Parameters:

    Yields:

    "},{"location":"api/arrow/#stac_geoparquet.arrow.parse_stac_ndjson_to_delta_lake","title":"parse_stac_ndjson_to_delta_lake","text":"
    parse_stac_ndjson_to_delta_lake(\n    input_path: str | Path | Iterable[str | Path],\n    table_or_uri: str | Path | DeltaTable,\n    *,\n    chunk_size: int = DEFAULT_JSON_CHUNK_SIZE,\n    schema: Schema | None = None,\n    limit: int | None = None,\n    schema_version: SUPPORTED_PARQUET_SCHEMA_VERSIONS = DEFAULT_PARQUET_SCHEMA_VERSION,\n    **kwargs: Any\n) -> None\n

    Convert one or more newline-delimited JSON STAC files to Delta Lake

    Parameters:

    Parameters:

    "},{"location":"api/arrow/#stac_geoparquet.arrow.parse_stac_ndjson_to_parquet","title":"parse_stac_ndjson_to_parquet","text":"
    parse_stac_ndjson_to_parquet(\n    input_path: str | Path | Iterable[str | Path],\n    output_path: str | Path,\n    *,\n    chunk_size: int = DEFAULT_JSON_CHUNK_SIZE,\n    schema: Schema | InferredSchema | None = None,\n    limit: int | None = None,\n    schema_version: SUPPORTED_PARQUET_SCHEMA_VERSIONS = DEFAULT_PARQUET_SCHEMA_VERSION,\n    **kwargs: Any\n) -> None\n

    Convert one or more newline-delimited JSON STAC files to GeoParquet

    Parameters:

    Other Parameters:

    "},{"location":"api/arrow/#stac_geoparquet.arrow.stac_table_to_items","title":"stac_table_to_items","text":"
    stac_table_to_items(table: Table) -> Iterable[dict]\n

    Convert a STAC Table to a generator of STAC Item dicts

    "},{"location":"api/arrow/#stac_geoparquet.arrow.stac_table_to_ndjson","title":"stac_table_to_ndjson","text":"
    stac_table_to_ndjson(table: Table, dest: str | Path | PathLike[bytes]) -> None\n

    Write a STAC Table to a newline-delimited JSON file.

    "},{"location":"api/arrow/#stac_geoparquet.arrow.to_parquet","title":"to_parquet","text":"
    to_parquet(\n    table: Table,\n    where: Any,\n    *,\n    schema_version: SUPPORTED_PARQUET_SCHEMA_VERSIONS = DEFAULT_PARQUET_SCHEMA_VERSION,\n    **kwargs: Any\n) -> None\n

    Write an Arrow table with STAC data to GeoParquet

    This writes metadata compliant with either GeoParquet 1.0 or 1.1.

    Parameters:

    Other Parameters:

    "},{"location":"api/legacy/","title":"Direct GeoPandas conversion (Legacy)","text":"

    The API listed here was the initial non-Arrow-based STAC-GeoParquet implementation, converting between JSON and GeoPandas directly. For large collections of STAC items, using the new Arrow-based functionality (under the stac_geoparquet.arrow namespace) will be more performant.

    "},{"location":"api/legacy/#stac_geoparquet.to_geodataframe","title":"stac_geoparquet.to_geodataframe","text":"
    to_geodataframe(\n    items: Sequence[dict[str, Any]],\n    add_self_link: bool = False,\n    dtype_backend: DTYPE_BACKEND | None = None,\n    datetime_precision: str = \"ns\",\n) -> GeoDataFrame\n

    Convert a sequence of STAC items to a geopandas.GeoDataFrame.

    The objects under properties are moved up to the top-level of the DataFrame, similar to geopandas.GeoDataFrame.from_features.

    Parameters:

    Returns:

    "},{"location":"api/legacy/#stac_geoparquet.to_item_collection","title":"stac_geoparquet.to_item_collection","text":"
    to_item_collection(df: GeoDataFrame) -> ItemCollection\n

    Convert a GeoDataFrame of STAC items to a pystac.ItemCollection.

    Parameters:

    Returns:

    "},{"location":"api/legacy/#stac_geoparquet.to_dict","title":"stac_geoparquet.to_dict","text":"
    to_dict(record: dict) -> dict\n

    Create a dictionary representing a STAC item from a row of the GeoDataFrame.

    Parameters:

    "},{"location":"spec/stac-geoparquet-spec/","title":"STAC GeoParquet Specification","text":""},{"location":"spec/stac-geoparquet-spec/#overview","title":"Overview","text":"

    This document specifies how to map a set of STAC Items into GeoParquet. It is directly inspired by the STAC GeoParquet library, but aims to provide guidance for anyone putting STAC data into GeoParquet.

    "},{"location":"spec/stac-geoparquet-spec/#use-cases","title":"Use cases","text":""},{"location":"spec/stac-geoparquet-spec/#guidelines","title":"Guidelines","text":"

    Each row in the Parquet Dataset represents a single STAC item. Most all the fields in a STAC Item should be mapped to a column in GeoParquet. We embrace Parquet structures where possible, mapping from JSON into nested structures. We do pull the properties to the top level, so that it is easier to query and use them. The names of most of the fields should be the same in STAC and in GeoParquet.

    Field GeoParquet Type Required Details type String Optional This is just needed for GeoJSON, so it is optional and not recommended to include in GeoParquet stac_extensions List of Strings Required This column is required, but can be empty if no STAC extensions were used id String Required Required, should be unique within each collection geometry Binary (WKB) Required For GeoParquet 1.0 this must be well-known Binary bbox Struct of Floats Required Can be a 4 or 6 value struct, depending on dimension of the data. It must conform to the \"Bounding Box Columns\" definition of GeoParquet 1.1. links List of Link structs Required See Link Struct for more info assets An Assets struct Required See Asset Struct for more info collection String Optional The ID of the collection this Item is a part of. See notes below on 'Collection' and 'Collection JSON' in the Parquet metadata property columns varies - Each property should use the relevant Parquet type, and be pulled out of the properties object to be a top-level Parquet field "},{"location":"spec/stac-geoparquet-spec/#link-struct","title":"Link Struct","text":"

    The GeoParquet dataset can contain zero or more Link Structs. Each Link Struct has 2 required fields and 2 optional ones:

    Field Name Type Description href string REQUIRED. The actual link in the format of an URL. Relative and absolute links are both allowed. rel string REQUIRED. Relationship between the current document and the linked document. See chapter \"Relation types\" for more information. type string Media type of the referenced entity. title string A human readable title to be used in rendered displays of the link.

    See Link Object for more.

    "},{"location":"spec/stac-geoparquet-spec/#asset-struct","title":"Asset Struct","text":"

    The GeoParquet dataset can contain zero or more Asset Structs. Each Asset Struct can have the following fields:

    Field Name Type Description href string REQUIRED. URI to the asset object. Relative and absolute URI are both allowed. title string The displayed title for clients and users. description string A description of the Asset providing additional details, such as how it was processed or created. CommonMark 0.29 syntax MAY be used for rich text representation. type string Media type of the asset. See the common media types in the best practice doc for commonly used asset types. roles [string] The semantic roles of the asset, similar to the use of rel in links.

    Each struct has each full asset key and object as a sub-struct, it's a direct mapping from the JSON to Parquet

    To take advantage of Parquet's columnar nature and compression, the assets should be uniform so they can be represented by a simple schema, which in turn means every item should probably come from the same STAC collection.

    See Asset Object for more.

    "},{"location":"spec/stac-geoparquet-spec/#including-a-stac-collection-json-in-a-stac-geoparquet-collection","title":"Including a STAC Collection JSON in a STAC Geoparquet Collection","text":"

    To make a stac-geoparquet file a fully self-contained representation, you can include the Collection JSON in the Parquet metadata. If present in the Parquet file metadata, the key must be stac:collection and the value must be a JSON string with the Collection JSON.

    "},{"location":"spec/stac-geoparquet-spec/#referencing-a-stac-geoparquet-collections-in-a-stac-collection-json","title":"Referencing a STAC Geoparquet Collections in a STAC Collection JSON","text":"

    A common use case of stac-geoparquet is to create a mirror of a STAC collection. To refer to this mirror in the original collection, use an Asset Object at the collection level of the STAC JSON that includes the application/vnd.apache.parquet Media type and collection-mirror Role type to describe the function of the Geoparquet STAC Collection Asset.

    For example:

    Field Name Type Value href string s3://example/uri/to/file.parquet title string An example STAC GeoParquet. description string Example description. type string application/vnd.apache.parquet roles [string] [collection-mirror]*

    *Note the IANA has not approved the new Media type application/vnd.apache.parquet yet, it's been submitted for approval.

    The description should ideally include details about the spatial partitioning method.

    "},{"location":"spec/stac-geoparquet-spec/#mapping-to-other-geospatial-data-formats","title":"Mapping to other geospatial data formats","text":"

    The principles here can likely be used to map into other geospatial data formats (GeoPackage, FlatGeobuf, etc), but we embrace Parquet's nested 'structs' for some of the mappings, so other formats will need to do something different. The obvious thing to do is to dump JSON into those fields, but that's outside the scope of this document, and we recommend creating a general document for that.

    "}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"STAC-geoparquet","text":"

    Convert STAC items between JSON, GeoParquet, pgstac, and Delta Lake.

    "},{"location":"#purpose","title":"Purpose","text":"

    The STAC spec defines a JSON-based schema. But it can be hard to manage and search through many millions of STAC items in JSON format. For one, JSON is very large on disk. And you need to parse the entire JSON data into memory to extract just a small piece of information, say the datetime and one asset of an Item.

    GeoParquet can be a good complement to JSON for many bulk-access and analytic use cases. While STAC Items are commonly distributed as individual JSON files on object storage or through a STAC API, STAC GeoParquet allows users to access a large number of STAC items in bulk without making repeated HTTP requests.

    For analytic questions like \"find the items in the Sentinel-2 collection in June 2024 over New York City with cloud cover of less than 20%\" it can be much, much faster to find the relevant data from a GeoParquet source than from JSON, because GeoParquet needs to load only the relevant columns for that query, not the full data.

    See the STAC-GeoParquet specification for details on the exact schema of the written Parquet files.

    "},{"location":"#documentation","title":"Documentation","text":"

    Documentation website

    "},{"location":"drawbacks/","title":"Drawbacks","text":"

    Trying to represent STAC data in GeoParquet has some drawbacks.

    "},{"location":"drawbacks/#unable-to-represent-undefined-values","title":"Unable to represent undefined values","text":"

    Parquet is unable to represent the difference between undefined and null, and so is unable to perfectly round-trip STAC data with undefined values.

    In JSON a value can have one of three states: defined, undefined, or null. The \"b\" key in the next three examples illustrates this:

    Defined:

    {\n  \"a\": 1,\n  \"b\": \"foo\"\n}\n

    Undefined:

    {\n  \"a\": 2\n}\n

    Null:

    {\n  \"a\": 3,\n  \"b\": null\n}\n

    Because Parquet is a columnar format, it is only able to represent undefined at the column level. So if those three JSON items above were converted to Parquet, the column \"b\" would exist because it exists in the first and third item, and the second item would have \"b\" inferred as null:

    a b 1 \"foo\" 2 null 3 null

    Then when the second item is converted back to JSON, it will be returned as

    {\n  \"a\": 2\n  \"b\": null\n}\n

    which is not strictly equal to the input.

    "},{"location":"drawbacks/#schema-difficulties","title":"Schema difficulties","text":"

    JSON is schemaless while Parquet requires a strict schema, and it can be very difficult to unite these two systems. This is such an important consideration that we have a documentation page just to discuss this point.

    "},{"location":"schema/","title":"Schema considerations","text":"

    A STAC Item is a JSON object to describe an external geospatial dataset. The STAC specification defines a common core, plus a variety of extensions. Additionally, STAC Items may include custom extensions outside the common ones. Crucially, the majority of the specified fields in the core spec and extensions define optional keys. Those keys often differ across STAC collections and may even differ within a single collection across items.

    STAC's flexibility is a blessing and a curse. The flexibility of schemaless JSON allows for very easy writing as each object can be dumped separately to JSON. Every item is allowed to have a different schema. And newer items are free to have a different schema than older items in the same collection. But this write-time flexibility makes it harder to read as there are no guarantees (outside STAC's few required fields) about what fields exist.

    Parquet is the complete opposite of JSON. Parquet has a strict schema that must be known before writing can start. This puts the burden of work onto the writer instead of the reader. Reading Parquet is very efficient because the file's metadata defines the exact schema of every record. This also enables use cases like reading specific columns that would not be possible without a strict schema.

    This conversion from schemaless to strict-schema is the difficult part of converting STAC from JSON to GeoParquet, especially for large input datasets like STAC that are often larger than memory.

    "},{"location":"schema/#full-scan-over-input-data","title":"Full scan over input data","text":"

    The most foolproof way to convert STAC JSON to GeoParquet is to perform a full scan over input data. This is done automatically by parse_stac_ndjson_to_arrow when a schema is not provided.

    This is time consuming as it requires two full passes over the input data: once to infer a common schema and again to actually write to Parquet (though items are never fully held in memory, allowing this process to scale).

    "},{"location":"schema/#user-provided-schema","title":"User-provided schema","text":"

    Alternatively, the user can pass in an Arrow schema themselves using the schema parameter of parse_stac_ndjson_to_arrow. This schema must match the on-disk schema of the the STAC JSON data.

    "},{"location":"schema/#multiple-schemas-per-collection","title":"Multiple schemas per collection","text":"

    It is also possible to write multiple Parquet files with STAC data where each Parquet file may have a different schema. This simplifies the conversion and writing process but makes reading and using the Parquet data harder.

    "},{"location":"schema/#merging-data-with-schema-mismatch","title":"Merging data with schema mismatch","text":"

    If you've created STAC GeoParquet data where the schema has updated, you can use pyarrow.concat_tables with promote_options=\"permissive\" to combine multiple STAC GeoParquet files.

    import pyarrow as pa\nimport pyarrow.parquet as pq\n\ntable_1 = pq.read_table(\"stac1.parquet\")\ntable_2 = pq.read_table(\"stac2.parquet\")\ncombined_table = pa.concat_tables([table1, table2], promote_options=\"permissive\")\n
    "},{"location":"schema/#future-work","title":"Future work","text":"

    Schema operations is an area where future work can improve reliability and ease of use of STAC GeoParquet.

    It's possible that in the future we could automatically infer an Arrow schema from the STAC specification's published JSON Schema files. If you're interested in this, open an issue and discuss.

    "},{"location":"usage/","title":"Usage","text":"

    Except for the legacy API, Apache Arrow is used as the in-memory interchange format between all formats. While some end-to-end helper functions are provided, the user can go through Arrow objects for maximal flexibility in the conversion process.

    All functionality that goes through Arrow is currently exported via the stac_geoparquet.arrow namespace.

    "},{"location":"usage/#dictjson-arrow-conversion","title":"dict/JSON - Arrow conversion","text":""},{"location":"usage/#convert-dicts-to-arrow","title":"Convert dicts to Arrow","text":"

    Use parse_stac_items_to_arrow to convert STAC items either in memory or on disk to a stream of Arrow record batches. This accepts either an iterable of Python dicts or an iterable of pystac.Item objects.

    "},{"location":"usage/#convert-json-to-arrow","title":"Convert JSON to Arrow","text":"

    parse_stac_ndjson_to_arrow is a helper function to take one or more JSON or newline-delimited JSON files on disk, infer the schema from all of them, and convert the data to a stream of Arrow record batches.

    "},{"location":"usage/#convert-arrow-to-dicts","title":"Convert Arrow to dicts","text":"

    Use stac_table_to_items to convert a table or stream of Arrow record batches of STAC data to a generator of Python dicts. This accepts either a pyarrow.Table or a pyarrow.RecordBatchReader, which allows conversions of larger-than-memory files in a streaming manner.

    "},{"location":"usage/#convert-arrow-to-json","title":"Convert Arrow to JSON","text":"

    Use stac_table_to_ndjson to convert a table or stream of Arrow record batches of STAC data to a generator of Python dicts. This accepts either a pyarrow.Table or a pyarrow.RecordBatchReader, which allows conversions of larger-than-memory files in a streaming manner.

    "},{"location":"usage/#parquet","title":"Parquet","text":"

    Use to_parquet to write STAC Arrow data in memory. This is a special function to ensure that GeoParquet 1.0 or 1.1 metadata is written to the Parquet file.

    parse_stac_ndjson_to_parquet is a helper that connects reading (newline-delimited) JSON on disk to writing out to a Parquet file.

    No special API is required for reading a STAC GeoParquet file back into Arrow. You can use pyarrow.parquet.read_table or pyarrow.parquet.ParquetFile directly to read the STAC GeoParquet data back into Arrow.

    "},{"location":"usage/#delta-lake","title":"Delta Lake","text":"

    Use parse_stac_ndjson_to_delta_lake to read (newline-delimited) JSON on disk and write out to a Delta Lake table.

    No special API is required for reading a STAC Delta Lake table back into Arrow. You can use the DeltaTable class directly to read the data back into Arrow.

    Important

    Arrow has a null data type, where every value in the column is always null, but Delta Lake does not. This means that for any column inferred to have a null data type, writing to Delta Lake will error with

    _internal.SchemaMismatchError: Invalid data type for Delta Lake: Null\n

    This is a problem because if all items in a STAC Collection have a null JSON key, it gets inferred as an Arrow null type. For example, in the 3dep-lidar-copc collection in the tests, it has start_datetime and end_datetime fields, and so according to the spec, datetime is always null. This column would need to be casted to a timestamp type before being written to Delta Lake.

    This means we cannot write this collection to Delta Lake solely with automatic schema inference.

    In such cases, users may need to manually update the inferred schema to cast any null type to another Delta Lake-compatible type.

    "},{"location":"api/arrow/","title":"stac_geoparquet.arrow","text":"

    Arrow-based format conversions.

    "},{"location":"api/arrow/#stac_geoparquet.arrow","title":"stac_geoparquet.arrow","text":""},{"location":"api/arrow/#stac_geoparquet.arrow.DEFAULT_JSON_CHUNK_SIZE","title":"DEFAULT_JSON_CHUNK_SIZE module-attribute","text":"
    DEFAULT_JSON_CHUNK_SIZE = 65536\n

    The default chunk size to use for reading JSON into memory.

    "},{"location":"api/arrow/#stac_geoparquet.arrow.DEFAULT_PARQUET_SCHEMA_VERSION","title":"DEFAULT_PARQUET_SCHEMA_VERSION module-attribute","text":"
    DEFAULT_PARQUET_SCHEMA_VERSION: SUPPORTED_PARQUET_SCHEMA_VERSIONS = '1.1.0'\n

    The default GeoParquet schema version written to file.

    "},{"location":"api/arrow/#stac_geoparquet.arrow.SUPPORTED_PARQUET_SCHEMA_VERSIONS","title":"SUPPORTED_PARQUET_SCHEMA_VERSIONS module-attribute","text":"
    SUPPORTED_PARQUET_SCHEMA_VERSIONS = Literal['1.0.0', '1.1.0']\n

    A Literal type with the supported GeoParquet schema versions.

    "},{"location":"api/arrow/#stac_geoparquet.arrow.parse_stac_items_to_arrow","title":"parse_stac_items_to_arrow","text":"
    parse_stac_items_to_arrow(\n    items: Iterable[Item | dict[str, Any]],\n    *,\n    chunk_size: int = 8192,\n    schema: Schema | InferredSchema | None = None\n) -> Iterable[RecordBatch]\n

    Parse a collection of STAC Items to an iterable of pyarrow.RecordBatch.

    The objects under properties are moved up to the top-level of the Table, similar to geopandas.GeoDataFrame.from_features.

    Parameters:

    Returns:

    "},{"location":"api/arrow/#stac_geoparquet.arrow.parse_stac_ndjson_to_arrow","title":"parse_stac_ndjson_to_arrow","text":"
    parse_stac_ndjson_to_arrow(\n    path: str | Path | Iterable[str | Path],\n    *,\n    chunk_size: int = DEFAULT_JSON_CHUNK_SIZE,\n    schema: Schema | None = None,\n    limit: int | None = None\n) -> Iterator[RecordBatch]\n

    Convert one or more newline-delimited JSON STAC files to a generator of Arrow RecordBatches.

    Each RecordBatch in the returned iterator is guaranteed to have an identical schema, and can be used to write to one or more Parquet files.

    Parameters:

    Other Parameters:

    Yields:

    "},{"location":"api/arrow/#stac_geoparquet.arrow.parse_stac_ndjson_to_delta_lake","title":"parse_stac_ndjson_to_delta_lake","text":"
    parse_stac_ndjson_to_delta_lake(\n    input_path: str | Path | Iterable[str | Path],\n    table_or_uri: str | Path | DeltaTable,\n    *,\n    chunk_size: int = DEFAULT_JSON_CHUNK_SIZE,\n    schema: Schema | None = None,\n    limit: int | None = None,\n    schema_version: SUPPORTED_PARQUET_SCHEMA_VERSIONS = DEFAULT_PARQUET_SCHEMA_VERSION,\n    **kwargs: Any\n) -> None\n

    Convert one or more newline-delimited JSON STAC files to Delta Lake

    Parameters:

    Parameters:

    "},{"location":"api/arrow/#stac_geoparquet.arrow.parse_stac_ndjson_to_parquet","title":"parse_stac_ndjson_to_parquet","text":"
    parse_stac_ndjson_to_parquet(\n    input_path: str | Path | Iterable[str | Path],\n    output_path: str | Path,\n    *,\n    chunk_size: int = DEFAULT_JSON_CHUNK_SIZE,\n    schema: Schema | InferredSchema | None = None,\n    limit: int | None = None,\n    schema_version: SUPPORTED_PARQUET_SCHEMA_VERSIONS = DEFAULT_PARQUET_SCHEMA_VERSION,\n    **kwargs: Any\n) -> None\n

    Convert one or more newline-delimited JSON STAC files to GeoParquet

    Parameters:

    Other Parameters:

    "},{"location":"api/arrow/#stac_geoparquet.arrow.stac_table_to_items","title":"stac_table_to_items","text":"
    stac_table_to_items(table: Table) -> Iterable[dict]\n

    Convert a STAC Table to a generator of STAC Item dicts

    "},{"location":"api/arrow/#stac_geoparquet.arrow.stac_table_to_ndjson","title":"stac_table_to_ndjson","text":"
    stac_table_to_ndjson(table: Table, dest: str | Path | PathLike[bytes]) -> None\n

    Write a STAC Table to a newline-delimited JSON file.

    "},{"location":"api/arrow/#stac_geoparquet.arrow.to_parquet","title":"to_parquet","text":"
    to_parquet(\n    table: Table,\n    where: Any,\n    *,\n    schema_version: SUPPORTED_PARQUET_SCHEMA_VERSIONS = DEFAULT_PARQUET_SCHEMA_VERSION,\n    **kwargs: Any\n) -> None\n

    Write an Arrow table with STAC data to GeoParquet

    This writes metadata compliant with either GeoParquet 1.0 or 1.1.

    Parameters:

    Other Parameters:

    "},{"location":"api/legacy/","title":"Direct GeoPandas conversion (Legacy)","text":"

    The API listed here was the initial non-Arrow-based STAC-GeoParquet implementation, converting between JSON and GeoPandas directly. For large collections of STAC items, using the new Arrow-based functionality (under the stac_geoparquet.arrow namespace) will be more performant.

    Note that stac_geoparquet lifts the keys in the item properties up to the top level of the DataFrame, similar to geopandas.GeoDataFrame.from_features.

    >>> import requests\n>>> import stac_geoparquet.arrow\n>>> import pyarrow.parquet\n>>> import pyarrow as pa\n\n>>> items = requests.get(\n...     \"https://planetarycomputer.microsoft.com/api/stac/v1/collections/sentinel-2-l2a/items\"\n... ).json()[\"features\"]\n>>> table = pa.Table.from_batches(stac_geoparquet.arrow.parse_stac_items_to_arrow(items))\n>>> stac_geoparquet.arrow.to_parquet(table, \"items.parquet\")\n>>> table2 = pyarrow.parquet.read_table(\"items.parquet\")\n>>> items2 = list(stac_geoparquet.arrow.stac_table_to_items(table2))\n
    "},{"location":"api/legacy/#stac_geoparquet.to_geodataframe","title":"stac_geoparquet.to_geodataframe","text":"
    to_geodataframe(\n    items: Sequence[dict[str, Any]],\n    add_self_link: bool = False,\n    dtype_backend: DTYPE_BACKEND | None = None,\n    datetime_precision: str = \"ns\",\n) -> GeoDataFrame\n

    Convert a sequence of STAC items to a geopandas.GeoDataFrame.

    The objects under properties are moved up to the top-level of the DataFrame, similar to geopandas.GeoDataFrame.from_features.

    Parameters:

    Returns:

    "},{"location":"api/legacy/#stac_geoparquet.to_item_collection","title":"stac_geoparquet.to_item_collection","text":"
    to_item_collection(df: GeoDataFrame) -> ItemCollection\n

    Convert a GeoDataFrame of STAC items to a pystac.ItemCollection.

    Parameters:

    Returns:

    "},{"location":"api/legacy/#stac_geoparquet.to_dict","title":"stac_geoparquet.to_dict","text":"
    to_dict(record: dict) -> dict\n

    Create a dictionary representing a STAC item from a row of the GeoDataFrame.

    Parameters:

    "},{"location":"api/pgstac/","title":"pgstac integration","text":"

    stac_geoparquet.pgstac_reader has some helpers for working with items coming from a pgstac.items table. It takes care of

    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig","title":"stac_geoparquet.pgstac_reader.CollectionConfig dataclass","text":"

    Additional collection-based configuration to inject, matching the dynamic properties from the API.

    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.collection","title":"collection property","text":"
    collection: Collection\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.collection_id","title":"collection_id instance-attribute","text":"
    collection_id: str\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.partition_frequency","title":"partition_frequency class-attribute instance-attribute","text":"
    partition_frequency: str | None = None\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.render_config","title":"render_config class-attribute instance-attribute","text":"
    render_config: str | None = None\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.should_inject_dynamic_properties","title":"should_inject_dynamic_properties class-attribute instance-attribute","text":"
    should_inject_dynamic_properties: bool = True\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.stac_api","title":"stac_api class-attribute instance-attribute","text":"
    stac_api: str = 'https://planetarycomputer.microsoft.com/api/stac/v1'\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.__init__","title":"__init__","text":"
    __init__(\n    collection_id: str,\n    partition_frequency: str | None = None,\n    stac_api: str = \"https://planetarycomputer.microsoft.com/api/stac/v1\",\n    should_inject_dynamic_properties: bool = True,\n    render_config: str | None = None,\n) -> None\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.__post_init__","title":"__post_init__","text":"
    __post_init__() -> None\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.export_collection","title":"export_collection","text":"
    export_collection(\n    conninfo: str,\n    output_protocol: str,\n    output_path: str,\n    storage_options: dict[str, Any],\n    rewrite: bool = False,\n    skip_empty_partitions: bool = False,\n) -> list[str | None]\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.export_partition","title":"export_partition","text":"
    export_partition(\n    conninfo: str,\n    query: str,\n    output_protocol: str,\n    output_path: str,\n    storage_options: dict[str, Any] | None = None,\n    rewrite: bool = False,\n    skip_empty_partitions: bool = False,\n) -> str | None\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.export_partition_for_endpoints","title":"export_partition_for_endpoints","text":"
    export_partition_for_endpoints(\n    endpoints: tuple[datetime, datetime],\n    conninfo: str,\n    output_protocol: str,\n    output_path: str,\n    storage_options: dict[str, Any],\n    part_number: int | None = None,\n    total: int | None = None,\n    rewrite: bool = False,\n    skip_empty_partitions: bool = False,\n) -> str | None\n

    Export results for a pair of endpoints.

    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.generate_endpoints","title":"generate_endpoints","text":"
    generate_endpoints(\n    since: datetime | None = None,\n) -> list[tuple[datetime, datetime]]\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.inject_assets","title":"inject_assets","text":"
    inject_assets(item: dict[str, Any]) -> None\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.inject_links","title":"inject_links","text":"
    inject_links(item: dict[str, Any]) -> None\n
    "},{"location":"api/pgstac/#stac_geoparquet.pgstac_reader.CollectionConfig.make_pgstac_items","title":"make_pgstac_items","text":"
    make_pgstac_items(\n    records: list[tuple[str, str, str, datetime, datetime, dict[str, Any]]],\n    base_item: dict[str, Any],\n) -> list[dict[str, Any]]\n

    Make STAC items out of pgstac records.

    Parameters:

    "},{"location":"spec/stac-geoparquet-spec/","title":"STAC GeoParquet Specification","text":""},{"location":"spec/stac-geoparquet-spec/#overview","title":"Overview","text":"

    This document specifies how to map a set of STAC Items into GeoParquet. It is directly inspired by the STAC GeoParquet library, but aims to provide guidance for anyone putting STAC data into GeoParquet.

    "},{"location":"spec/stac-geoparquet-spec/#use-cases","title":"Use cases","text":""},{"location":"spec/stac-geoparquet-spec/#guidelines","title":"Guidelines","text":"

    Each row in the Parquet Dataset represents a single STAC item. Most all the fields in a STAC Item should be mapped to a column in GeoParquet. We embrace Parquet structures where possible, mapping from JSON into nested structures. We do pull the properties to the top level, so that it is easier to query and use them. The names of most of the fields should be the same in STAC and in GeoParquet.

    Field GeoParquet Type Required Details type String Optional This is just needed for GeoJSON, so it is optional and not recommended to include in GeoParquet stac_extensions List of Strings Required This column is required, but can be empty if no STAC extensions were used id String Required Required, should be unique within each collection geometry Binary (WKB) Required For GeoParquet 1.0 this must be well-known Binary bbox Struct of Floats Required Can be a 4 or 6 value struct, depending on dimension of the data. It must conform to the \"Bounding Box Columns\" definition of GeoParquet 1.1. links List of Link structs Required See Link Struct for more info assets An Assets struct Required See Asset Struct for more info collection String Optional The ID of the collection this Item is a part of. See notes below on 'Collection' and 'Collection JSON' in the Parquet metadata property columns varies - Each property should use the relevant Parquet type, and be pulled out of the properties object to be a top-level Parquet field "},{"location":"spec/stac-geoparquet-spec/#link-struct","title":"Link Struct","text":"

    The GeoParquet dataset can contain zero or more Link Structs. Each Link Struct has 2 required fields and 2 optional ones:

    Field Name Type Description href string REQUIRED. The actual link in the format of an URL. Relative and absolute links are both allowed. rel string REQUIRED. Relationship between the current document and the linked document. See chapter \"Relation types\" for more information. type string Media type of the referenced entity. title string A human readable title to be used in rendered displays of the link.

    See Link Object for more.

    "},{"location":"spec/stac-geoparquet-spec/#asset-struct","title":"Asset Struct","text":"

    The GeoParquet dataset can contain zero or more Asset Structs. Each Asset Struct can have the following fields:

    Field Name Type Description href string REQUIRED. URI to the asset object. Relative and absolute URI are both allowed. title string The displayed title for clients and users. description string A description of the Asset providing additional details, such as how it was processed or created. CommonMark 0.29 syntax MAY be used for rich text representation. type string Media type of the asset. See the common media types in the best practice doc for commonly used asset types. roles [string] The semantic roles of the asset, similar to the use of rel in links.

    Each struct has each full asset key and object as a sub-struct, it's a direct mapping from the JSON to Parquet

    To take advantage of Parquet's columnar nature and compression, the assets should be uniform so they can be represented by a simple schema, which in turn means every item should probably come from the same STAC collection.

    See Asset Object for more.

    "},{"location":"spec/stac-geoparquet-spec/#including-a-stac-collection-json-in-a-stac-geoparquet-collection","title":"Including a STAC Collection JSON in a STAC Geoparquet Collection","text":"

    To make a stac-geoparquet file a fully self-contained representation, you can include the Collection JSON in the Parquet metadata. If present in the Parquet file metadata, the key must be stac:collection and the value must be a JSON string with the Collection JSON.

    "},{"location":"spec/stac-geoparquet-spec/#referencing-a-stac-geoparquet-collections-in-a-stac-collection-json","title":"Referencing a STAC Geoparquet Collections in a STAC Collection JSON","text":"

    A common use case of stac-geoparquet is to create a mirror of a STAC collection. To refer to this mirror in the original collection, use an Asset Object at the collection level of the STAC JSON that includes the application/vnd.apache.parquet Media type and collection-mirror Role type to describe the function of the Geoparquet STAC Collection Asset.

    For example:

    Field Name Type Value href string s3://example/uri/to/file.parquet title string An example STAC GeoParquet. description string Example description. type string application/vnd.apache.parquet roles [string] [collection-mirror]*

    *Note the IANA has not approved the new Media type application/vnd.apache.parquet yet, it's been submitted for approval.

    The description should ideally include details about the spatial partitioning method.

    "},{"location":"spec/stac-geoparquet-spec/#mapping-to-other-geospatial-data-formats","title":"Mapping to other geospatial data formats","text":"

    The principles here can likely be used to map into other geospatial data formats (GeoPackage, FlatGeobuf, etc), but we embrace Parquet's nested 'structs' for some of the mappings, so other formats will need to do something different. The obvious thing to do is to dump JSON into those fields, but that's outside the scope of this document, and we recommend creating a general document for that.

    "}]} \ No newline at end of file diff --git a/0.5.1/sitemap.xml b/0.5.1/sitemap.xml index 7128ae8..c7f9c19 100644 --- a/0.5.1/sitemap.xml +++ b/0.5.1/sitemap.xml @@ -5,6 +5,16 @@ 2024-06-24 daily + + https://stac-utils.github.io/stac-geoparquet/latest/drawbacks/ + 2024-06-24 + daily + + + https://stac-utils.github.io/stac-geoparquet/latest/schema/ + 2024-06-24 + daily + https://stac-utils.github.io/stac-geoparquet/latest/usage/ 2024-06-24 @@ -20,6 +30,11 @@ 2024-06-24 daily + + https://stac-utils.github.io/stac-geoparquet/latest/api/pgstac/ + 2024-06-24 + daily + https://stac-utils.github.io/stac-geoparquet/latest/spec/stac-geoparquet-spec/ 2024-06-24 diff --git a/0.5.1/sitemap.xml.gz b/0.5.1/sitemap.xml.gz index 19fcbb4..ff0ffba 100644 Binary files a/0.5.1/sitemap.xml.gz and b/0.5.1/sitemap.xml.gz differ diff --git a/0.5.1/spec/stac-geoparquet-spec/index.html b/0.5.1/spec/stac-geoparquet-spec/index.html index 2046e83..835168a 100644 --- a/0.5.1/spec/stac-geoparquet-spec/index.html +++ b/0.5.1/spec/stac-geoparquet-spec/index.html @@ -13,7 +13,7 @@ - + @@ -341,6 +341,26 @@ + + +
  • + + + + + Schema considerations + + + + +
  • + + + + + + + @@ -488,6 +508,8 @@ + + @@ -495,10 +517,10 @@ - + -