diff --git a/Cargo.toml b/Cargo.toml index c2eed35..0d03c54 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,8 @@ members = [ "ntex-grpc", "ntex-grpc-codegen", + "prost-build", + "prost-derive", "examples/helloworld" ] @@ -9,4 +11,5 @@ members = [ ntex-grpc = { path = "./ntex-grpc" } ntex-grpc-codegen = { path = "./ntex-grpc-codegen" } -ntex-h2 = { git = "https://github.com/ntex-rs/ntex-h2.git" } +ntex-prost-build = { path = "./prost-build" } +ntex-prost-derive = { path = "./prost-derive" } diff --git a/examples/helloworld/helloworld.proto b/examples/helloworld/helloworld.proto index 8de5d08..5ad413d 100644 --- a/examples/helloworld/helloworld.proto +++ b/examples/helloworld/helloworld.proto @@ -28,10 +28,10 @@ service Greeter { // The request message containing the user's name. message HelloRequest { - string name = 1; + bytes name = 1; } // The response message containing the greetings message HelloReply { string message = 1; -} \ No newline at end of file +} diff --git a/examples/helloworld/src/client.rs b/examples/helloworld/src/client.rs index 8a08b39..4a53153 100644 --- a/examples/helloworld/src/client.rs +++ b/examples/helloworld/src/client.rs @@ -58,12 +58,13 @@ fn main() { } loop { - client + let res = client .say_hello(&HelloRequest { name: "world".into(), }) .await .unwrap(); + println!("RES: {:?}", res); counters.register_request(); break; } diff --git a/examples/helloworld/src/helloworld.rs b/examples/helloworld/src/helloworld.rs index f0d303c..1e56f7e 100644 --- a/examples/helloworld/src/helloworld.rs +++ b/examples/helloworld/src/helloworld.rs @@ -1,24 +1,31 @@ +#![allow(dead_code)] /// DO NOT MODIFY. Auto-generated file -use ntex_grpc::codegen as ngrpc; -/// The request message containing the user's name. -#[derive(Clone, PartialEq, ::prost::Message)] +/// The request message containing the user's name. +#[derive(Clone, PartialEq, ::ntex_grpc::Message)] pub struct HelloRequest { - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, + #[prost(bytes, tag = "1")] + pub name: ::ntex_grpc::types::Bytes, } -/// The response message containing the greetings -#[derive(Clone, PartialEq, ::prost::Message)] +/// The response message containing the greetings +#[derive(Clone, PartialEq, ::ntex_grpc::Message)] pub struct HelloReply { #[prost(string, tag = "1")] - pub message: ::prost::alloc::string::String, + pub message: ::ntex_grpc::types::ByteString, } /// `Greeter` service client definition #[doc = " The greeting service definition."] #[derive(Clone)] pub struct GreeterClient(T); -impl ngrpc::ClientInformation for GreeterClient { +impl GreeterClient { + #[inline] + #[doc = r" Create new client instance"] + pub fn new(transport: T) -> Self { + Self(transport) + } +} +impl ::ntex_grpc::ClientInformation for GreeterClient { #[inline] #[doc = r" Create new client instance"] fn create(transport: T) -> Self { @@ -42,18 +49,19 @@ impl ngrpc::ClientInformation for GreeterClient { } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct GreeterSayHelloMethod; -impl ngrpc::MethodDef for GreeterSayHelloMethod { +impl ::ntex_grpc::MethodDef for GreeterSayHelloMethod { const NAME: &'static str = "SayHello"; - const PATH: ngrpc::ByteString = ngrpc::ByteString::from_static("/helloworld.Greeter/SayHello"); + const PATH: ::ntex_grpc::types::ByteString = + ::ntex_grpc::types::ByteString::from_static("/helloworld.Greeter/SayHello"); type Input = HelloRequest; type Output = HelloReply; } -impl> GreeterClient { +impl> GreeterClient { #[doc = " Sends a greeting"] pub fn say_hello<'a>( &'a self, req: &'a HelloRequest, - ) -> ngrpc::Request<'a, T, GreeterSayHelloMethod> { - ngrpc::Request::new(&self.0, req) + ) -> ::ntex_grpc::Request<'a, T, GreeterSayHelloMethod> { + ::ntex_grpc::Request::new(&self.0, req) } } diff --git a/ntex-grpc-codegen/Cargo.toml b/ntex-grpc-codegen/Cargo.toml index d4d0d14..bf791db 100644 --- a/ntex-grpc-codegen/Cargo.toml +++ b/ntex-grpc-codegen/Cargo.toml @@ -10,7 +10,7 @@ path = "src/main.rs" [dependencies] clap = { version = "3.2", features = ["derive"] } -prost-build = "0.10" +ntex-prost-build = "0.10.1" proc-macro2 = "1.0" quote = "1.0" syn = "1.0" diff --git a/ntex-grpc-codegen/src/config.rs b/ntex-grpc-codegen/src/config.rs index 3bfc01b..33e54e8 100644 --- a/ntex-grpc-codegen/src/config.rs +++ b/ntex-grpc-codegen/src/config.rs @@ -1,6 +1,6 @@ use std::{env, io, ops, path::Path}; -use prost_build::Config; +use ntex_prost_build::Config; use crate::generator::GrpcServiceGenerator; @@ -21,6 +21,11 @@ impl NGrpcGenerator { Self { pconfig: cfg } } + /// Map protobuf bytes type to custom rust type + pub fn map_bytes(&mut self, path: &str, rust_type: &str) { + let _ = self.pconfig.bytes(&[path], rust_type); + } + /// Compile `.proto` files into Rust files during a Cargo build with additional code generator /// configuration options. pub fn compile_protos( diff --git a/ntex-grpc-codegen/src/generator.rs b/ntex-grpc-codegen/src/generator.rs index 0a4df04..9fdb7f0 100644 --- a/ntex-grpc-codegen/src/generator.rs +++ b/ntex-grpc-codegen/src/generator.rs @@ -1,5 +1,5 @@ +use ntex_prost_build::{Method, Service, ServiceGenerator}; use proc_macro2::TokenStream; -use prost_build::{Method, Service, ServiceGenerator}; use quote::quote; #[derive(Debug, Copy, Clone)] @@ -23,7 +23,7 @@ impl ServiceGenerator for GrpcServiceGenerator { fn finalize(&mut self, buf: &mut String) { buf.insert_str( 0, - "/// DO NOT MODIFY. Auto-generated file\nuse ntex_grpc::codegen as ngrpc;\n\n", + "#![allow(dead_code)]\n/// DO NOT MODIFY. Auto-generated file\n\n", ) } } @@ -53,7 +53,7 @@ fn generate_client(service: &Service, buf: &mut String) { } } - impl ngrpc::ClientInformation for #service_ident { + impl ::ntex_grpc::ClientInformation for #service_ident { #[inline] /// Create new client instance fn create(transport: T) -> Self { @@ -105,17 +105,17 @@ fn gen_method(method: &Method, service: &Service) -> TokenStream { #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct #def_ident; - impl ngrpc::MethodDef for #def_ident { + impl ::ntex_grpc::MethodDef for #def_ident { const NAME: &'static str = #proto_name; - const PATH: ngrpc::ByteString = ngrpc::ByteString::from_static(#path); + const PATH: ::ntex_grpc::types::ByteString = ::ntex_grpc::types::ByteString::from_static(#path); type Input = #input_type; type Output = #output_type; } - impl> #service_ident { + impl> #service_ident { #[doc = #(#comments)*] - pub fn #method_ident<'a>(&'a self, req: &'a #input_type) -> ngrpc::Request<'a, T, #def_ident> { - ngrpc::Request::new(&self.0, req) + pub fn #method_ident<'a>(&'a self, req: &'a #input_type) -> ::ntex_grpc::Request<'a, T, #def_ident> { + ::ntex_grpc::Request::new(&self.0, req) } } } diff --git a/ntex-grpc-codegen/src/main.rs b/ntex-grpc-codegen/src/main.rs index f693119..f4f4488 100644 --- a/ntex-grpc-codegen/src/main.rs +++ b/ntex-grpc-codegen/src/main.rs @@ -29,6 +29,10 @@ struct Args { #[clap(short, long, value_parser, name = "INCLUDE-DIR")] include_dir: Vec, + /// Map protobuf bytes type to custom rust type that implements BytesAdapter trait. {name}={rust-type-name} + #[clap(short, long, value_parser, name = "MAP-BYTES")] + map_bytes: Vec, + /// Path to rustfmt configuration file #[clap(short, long, value_parser, name = "RUSTFMT-PATH")] rustfmt_path: Option, @@ -43,6 +47,15 @@ fn main() -> io::Result<()> { if let Some(out_dir) = args.out_dir.clone() { cfg.out_dir(out_dir); } + + for map in args.map_bytes { + if let Some((s1, s2)) = map.split_once('=') { + cfg.map_bytes(s1, s2) + } else { + println!("Cannot parse bytes mapping: {:?}", map); + } + } + if let Err(e) = cfg.compile_protos(&args.proto, &args.include_dir) { println!("{}", e); } else { diff --git a/ntex-grpc/Cargo.toml b/ntex-grpc/Cargo.toml index 2383bd0..29dba8f 100644 --- a/ntex-grpc/Cargo.toml +++ b/ntex-grpc/Cargo.toml @@ -25,6 +25,8 @@ ntex-bytes = "0.1.15" ntex-service = "0.3" ntex-rt = "0.4" +ntex-prost-derive = "0.10.1" + async-trait = "0.1.0" bitflags = "1.3" prost = "0.10" diff --git a/ntex-grpc/src/client.rs b/ntex-grpc/src/client.rs index f534dc1..15c3e50 100644 --- a/ntex-grpc/src/client.rs +++ b/ntex-grpc/src/client.rs @@ -8,10 +8,9 @@ use ntex_http::{header, HeaderMap, Method}; use ntex_io::{IoBoxed, OnDisconnect}; use ntex_service::{fn_service, Service}; use ntex_util::{channel::oneshot, future::Ready, HashMap}; -use prost::Message; use crate::service::{ClientInformation, MethodDef, Transport}; -use crate::{consts, ServiceError}; +use crate::{consts, Message, ServiceError}; #[derive(thiserror::Error, Debug)] pub enum ClientError { @@ -105,7 +104,7 @@ impl Transport for Client { let stream = self .0 .client - .send_request(Method::POST, T::PATH, hdrs) + .send_request(Method::POST, T::PATH, hdrs, false) .await?; stream.send_payload(buf.freeze(), true).await?; @@ -123,7 +122,7 @@ impl Transport for Client { Ok(Ok((mut data, trailers))) => { let _compressed = data.get_u8(); let len = data.get_u32(); - match ::decode(data.split_to(len as usize)) { + match ::decode(&mut data.split_to(len as usize)) { Ok(item) => Ok((item, trailers)), Err(_e) => Err(ServiceError::Canceled), } diff --git a/ntex-grpc/src/encoding.rs b/ntex-grpc/src/encoding.rs new file mode 100644 index 0000000..d1e07b1 --- /dev/null +++ b/ntex-grpc/src/encoding.rs @@ -0,0 +1,178 @@ +use std::convert::TryFrom; + +use ntex_bytes::{ByteString, Bytes, BytesMut}; +use prost::encoding::{self, decode_varint, encode_key, encode_varint, encoded_len_varint}; +use prost::encoding::{DecodeContext, WireType}; + +use crate::{error::DecodeError, types::BytesAdapter}; + +impl BytesAdapter for Vec { + fn len(&self) -> usize { + Vec::len(self) + } + + fn replace_with(&mut self, buf: Bytes) -> Result<(), DecodeError> { + self.clear(); + self.reserve(buf.len()); + self.extend(&buf); + Ok(()) + } + + fn append_to(&self, buf: &mut BytesMut) { + buf.extend_from_slice(self.as_slice()) + } + + fn clear(&mut self) { + self.clear() + } +} + +impl BytesAdapter for Bytes { + fn len(&self) -> usize { + Bytes::len(self) + } + + fn replace_with(&mut self, buf: Bytes) -> Result<(), DecodeError> { + *self = buf; + Ok(()) + } + + fn append_to(&self, buf: &mut BytesMut) { + buf.extend_from_slice(self) + } + + fn clear(&mut self) { + self.clear() + } +} + +impl BytesAdapter for String { + fn len(&self) -> usize { + String::len(self) + } + + fn replace_with(&mut self, buf: Bytes) -> Result<(), DecodeError> { + if let Ok(s) = ByteString::try_from(buf) { + self.push_str(s.as_str()); + Ok(()) + } else { + Err(DecodeError::new( + "invalid string value: data is not UTF-8 encoded", + )) + } + } + + fn append_to(&self, buf: &mut BytesMut) { + buf.extend_from_slice(self.as_bytes()) + } + + fn clear(&mut self) { + self.clear() + } +} + +impl BytesAdapter for ByteString { + fn len(&self) -> usize { + self.as_slice().len() + } + + fn replace_with(&mut self, buf: Bytes) -> Result<(), DecodeError> { + if let Ok(s) = ByteString::try_from(buf) { + *self = s; + Ok(()) + } else { + Err(DecodeError::new( + "invalid string value: data is not UTF-8 encoded", + )) + } + } + + fn append_to(&self, buf: &mut BytesMut) { + buf.extend_from_slice(self.as_bytes()) + } + + fn clear(&mut self) { + *self = ByteString::new() + } +} + +pub mod bytes { + use super::*; + + #[inline] + pub fn clear(value: &mut A) { + value.clear() + } + + #[inline] + pub fn encode(tag: u32, value: &A, buf: &mut BytesMut) + where + A: BytesAdapter, + { + encode_key(tag, WireType::LengthDelimited, buf); + encode_varint(value.len() as u64, buf); + value.append_to(buf); + } + + #[inline] + pub fn encode_repeated(tag: u32, values: &[A], buf: &mut BytesMut) + where + A: BytesAdapter, + { + for value in values { + encode(tag, value, buf); + } + } + + #[inline] + pub fn merge( + wire_type: WireType, + value: &mut A, + buf: &mut Bytes, + _ctx: DecodeContext, + ) -> Result<(), DecodeError> + where + A: BytesAdapter, + { + encoding::check_wire_type(WireType::LengthDelimited, wire_type)?; + let len = decode_varint(buf)? as usize; + + value.replace_with(buf.split_to(len)) + } + + pub fn merge_repeated( + wire_type: WireType, + values: &mut Vec, + buf: &mut Bytes, + ctx: DecodeContext, + ) -> Result<(), DecodeError> + where + A: BytesAdapter, + { + encoding::check_wire_type(WireType::LengthDelimited, wire_type)?; + let mut value = Default::default(); + merge(wire_type, &mut value, buf, ctx)?; + values.push(value); + Ok(()) + } + + #[inline] + pub fn encoded_len(tag: u32, value: &A) -> usize + where + A: BytesAdapter, + { + encoding::key_len(tag) + encoded_len_varint(value.len() as u64) + value.len() + } + + #[inline] + pub fn encoded_len_repeated(tag: u32, values: &[A]) -> usize + where + A: BytesAdapter, + { + encoding::key_len(tag) * values.len() + + values + .iter() + .map(|value| encoded_len_varint(value.len() as u64) + value.len()) + .sum::() + } +} diff --git a/ntex-grpc/src/error.rs b/ntex-grpc/src/error.rs index 9aaf998..c4fb9d9 100644 --- a/ntex-grpc/src/error.rs +++ b/ntex-grpc/src/error.rs @@ -1,4 +1,5 @@ use ntex_h2::{OperationError, StreamError}; +pub use prost::DecodeError; #[derive(thiserror::Error, Clone, Debug)] pub enum ServiceError { diff --git a/ntex-grpc/src/lib.rs b/ntex-grpc/src/lib.rs index 53b4180..31b2ff8 100644 --- a/ntex-grpc/src/lib.rs +++ b/ntex-grpc/src/lib.rs @@ -4,17 +4,29 @@ mod request; mod service; pub mod client; +pub mod types; -pub use crate::error::ServiceError; +pub use crate::error::{DecodeError, ServiceError}; pub use crate::request::{Request, Response}; pub use crate::service::{ClientInformation, MethodDef, Transport}; +pub use crate::types::{BytesAdapter, Message}; + +#[doc(hidden)] +pub mod encoding; #[doc(hidden)] pub mod codegen { - pub use ntex_bytes::ByteString; + pub use ntex_bytes::{ByteString, BytesMut}; pub use ntex_service::Service; pub use crate::request::{Request, Response}; pub use crate::service::{ClientInformation, MethodDef, Transport}; pub use crate::ServiceError; } + +// [1]: https://github.com/serde-rs/serde/blob/v1.0.89/serde/src/lib.rs#L245-L256 +#[allow(unused_imports)] +#[macro_use] +extern crate ntex_prost_derive; +#[doc(hidden)] +pub use ntex_prost_derive::*; diff --git a/ntex-grpc/src/request.rs b/ntex-grpc/src/request.rs index 786af34..39845c4 100644 --- a/ntex-grpc/src/request.rs +++ b/ntex-grpc/src/request.rs @@ -1,4 +1,4 @@ -use std::{future::Future, pin::Pin, task::Context, task::Poll}; +use std::{fmt, future::Future, pin::Pin, task::Context, task::Poll}; use ntex_http::HeaderMap; use ntex_util::ready; @@ -75,3 +75,9 @@ impl Response { (self.message, self.metadata) } } + +impl fmt::Debug for Response { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.message.fmt(f) + } +} diff --git a/ntex-grpc/src/service.rs b/ntex-grpc/src/service.rs index e9f1d48..ac1233f 100644 --- a/ntex-grpc/src/service.rs +++ b/ntex-grpc/src/service.rs @@ -2,15 +2,17 @@ use async_trait::async_trait; use ntex_bytes::ByteString; use ntex_http::HeaderMap; +use crate::types::Message; + /// Trait for service method definition pub trait MethodDef { const NAME: &'static str; const PATH: ByteString; - type Input: prost::Message; + type Input: Message; - type Output: prost::Message + Default; + type Output: Message; } #[async_trait(?Send)] diff --git a/ntex-grpc/src/types.rs b/ntex-grpc/src/types.rs new file mode 100644 index 0000000..3ee2638 --- /dev/null +++ b/ntex-grpc/src/types.rs @@ -0,0 +1,126 @@ +use std::fmt::Debug; + +pub use ntex_bytes::{ByteString, Bytes, BytesMut}; + +use prost::encoding::{decode_key, encode_varint, encoded_len_varint, DecodeContext, WireType}; +use prost::{DecodeError, EncodeError}; + +/// A Protocol Buffers message. +pub trait Message: Debug + Default { + /// Encodes the message to a buffer. + /// + /// This method will panic if the buffer has insufficient capacity. + /// + /// Meant to be used only by `Message` implementations. + #[doc(hidden)] + fn encode_raw(&self, buf: &mut BytesMut) + where + Self: Sized; + + /// Decodes a field from a buffer, and merges it into `self`. + /// + /// Meant to be used only by `Message` implementations. + #[doc(hidden)] + fn merge_field( + &mut self, + tag: u32, + wire_type: WireType, + buf: &mut Bytes, + ctx: DecodeContext, + ) -> Result<(), DecodeError> + where + Self: Sized; + + /// Returns the encoded length of the message without a length delimiter. + fn encoded_len(&self) -> usize; + + /// Encodes the message to a buffer. + /// + /// An error will be returned if the buffer does not have sufficient capacity. + fn encode(&self, buf: &mut BytesMut) -> Result<(), EncodeError> + where + Self: Sized, + { + buf.reserve(self.encoded_len()); + self.encode_raw(buf); + Ok(()) + } + + /// Encodes the message with a length-delimiter to a buffer. + /// + /// An error will be returned if the buffer does not have sufficient capacity. + fn encode_length_delimited(&self, buf: &mut BytesMut) -> Result<(), EncodeError> + where + Self: Sized, + { + let len = self.encoded_len(); + let required = len + encoded_len_varint(len as u64); + buf.reserve(required); + encode_varint(len as u64, buf); + self.encode_raw(buf); + Ok(()) + } + + /// Decodes an instance of the message from a buffer. + /// + /// The entire buffer will be consumed. + fn decode(buf: &mut Bytes) -> Result { + let mut message = Self::default(); + Self::merge(&mut message, buf).map(|_| message) + } + + /// Decodes a length-delimited instance of the message from the buffer. + fn decode_length_delimited(buf: &mut Bytes) -> Result { + let mut message = Self::default(); + message.merge_length_delimited(buf)?; + Ok(message) + } + + /// Decodes an instance of the message from a buffer, and merges it into `self`. + /// + /// The entire buffer will be consumed. + fn merge(&mut self, buf: &mut Bytes) -> Result<(), DecodeError> + where + Self: Sized, + { + let ctx = DecodeContext::default(); + while !buf.is_empty() { + let (tag, wire_type) = decode_key(buf)?; + self.merge_field(tag, wire_type, buf, ctx.clone())?; + } + Ok(()) + } + + /// Decodes a length-delimited instance of the message from buffer, and + /// merges it into `self`. + fn merge_length_delimited(&mut self, buf: &mut Bytes) -> Result<(), DecodeError> + where + Self: Sized, + { + let ctx = DecodeContext::default(); + prost::encoding::merge_loop(self, buf, ctx, |msg: &mut Self, buf: &mut Bytes, ctx| { + let (tag, wire_type) = decode_key(buf)?; + msg.merge_field(tag, wire_type, buf, ctx) + }) + } + + /// Clears the message, resetting all fields to their default. + fn clear(&mut self); +} + +pub trait BytesAdapter: Default + Sized { + fn len(&self) -> usize; + + /// Replace contents of this buffer with the contents of another buffer. + fn replace_with(&mut self, buf: Bytes) -> Result<(), DecodeError>; + + /// Appends this buffer to the (contents of) other buffer. + fn append_to(&self, buf: &mut BytesMut); + + /// Clear content + fn clear(&mut self); + + fn is_empty(&self) -> bool { + self.len() == 0 + } +} diff --git a/prost-build/Cargo.toml b/prost-build/Cargo.toml new file mode 100644 index 0000000..6d89fde --- /dev/null +++ b/prost-build/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "ntex-prost-build" +version = "0.10.1" +authors = [ + "Dan Burkert ", + "Tokio Contributors ", +] +license = "Apache-2.0" +repository = "https://github.com/ntex-rs/ntex-grpc" +documentation = "https://docs.rs/ntex-prost-build" +readme = "README.md" +description = "A Protocol Buffers implementation for the Rust Language." +edition = "2018" + +[features] +default = [] +vendored = [] +# When MSRV moves to 1.60, these can change to dep: +cleanup-markdown = ["pulldown-cmark", "pulldown-cmark-to-cmark"] + +[dependencies] +heck = "0.4" +itertools = "0.10" +log = "0.4" +multimap = { version = "0.8", default-features = false } +petgraph = { version = "0.6", default-features = false } +prost = { version = "0.10.0", default-features = false } +prost-types = { version = "0.10.0", default-features = false } +tempfile = "3" +lazy_static = "1.4.0" +regex = { version = "1.5.5", default-features = false, features = ["std", "unicode-bool"] } +# These two must be kept in sync +pulldown-cmark = { version = "0.9.1", optional = true, default-features = false } +pulldown-cmark-to-cmark = { version = "10.0.1", optional = true } + +[build-dependencies] +which = { version = "4", default-features = false } +cfg-if = "1" +cmake = "0.1" + +[dev-dependencies] +env_logger = { version = "0.8", default-features = false } diff --git a/prost-build/LICENSE b/prost-build/LICENSE new file mode 100644 index 0000000..16fe87b --- /dev/null +++ b/prost-build/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/prost-build/README.md b/prost-build/README.md new file mode 100644 index 0000000..248e0ff --- /dev/null +++ b/prost-build/README.md @@ -0,0 +1,38 @@ +[![Documentation](https://docs.rs/prost-build/badge.svg)](https://docs.rs/prost-build/) +[![Crate](https://img.shields.io/crates/v/prost-build.svg)](https://crates.io/crates/prost-build) + +# `prost-build` + +`prost-build` makes it easy to generate Rust code from `.proto` files as part of +a Cargo build. See the crate [documentation](https://docs.rs/prost-build/) for examples +of how to integrate `prost-build` into a Cargo project. + +## `protoc` + +`prost-build` uses `protoc` to parse the proto files. There are a few ways to make `protoc` +available for `prost-build`. + +The first option is to include `protoc` in your `PATH` this +can be done by following the [`protoc` install instructions]. In addition, its possible to +pass the `PROTOC=` environment variable. + +[`protoc` install instructions]: https://github.com/protocolbuffers/protobuf#protocol-compiler-installation + +The second option is to provide the `vendored` feature flag to `prost-build`. This will +force `prost-build` to compile `protoc` from the bundled source. This will require that +you have the correct dependencies installed include a C++ toolchain, cmake, etc. For +more info on what the required dependencies are check [here]. + +[here]: https://github.com/protocolbuffers/protobuf/blob/master/src/README.md + +If you would like to always ignore vendoring `protoc` you can additionally pass +`PROTOC_NO_VENDOR` and this will always check the `PATH`/`PROTOC` environment +variables and never compile `protoc` from source. + +## License + +`prost-build` is distributed under the terms of the Apache License (Version 2.0). + +See [LICENSE](../LICENSE) for details. + +Copyright 2017 Dan Burkert diff --git a/prost-build/build.rs b/prost-build/build.rs new file mode 100644 index 0000000..14ebe57 --- /dev/null +++ b/prost-build/build.rs @@ -0,0 +1,120 @@ +//! Finds the appropriate `protoc` binary and Protobuf include directory for this host, and outputs +//! build directives so that the main `prost-build` crate can use them. +//! +//! This build script attempts to find `protoc` in a few ways: +//! +//! 1. If `PROTOC_NO_VENDOR` is enabled, it will check the `PROTOC` environment variable +//! then check the `PATH` for a `protoc` or `protoc.exe`. +//! 2. If the `vendored` feature flag is enabled or `protoc` can't be found via the environment +//! variable or in the `PATH` then `prost-build` will attempt to build `protoc` from the +//! bundled source code. +//! 3. Otherwise, it will attempt to execute from the `PATH` and fail if it does not exist. +//! +//! The following locations are checked for the Protobuf include directory in decreasing priority: +//! +//! 1. The `PROTOC_INCLUDE` environment variable. +//! 2. The bundled Protobuf include directory. +//! + +use cfg_if::cfg_if; +use std::env; +use std::path::PathBuf; +use which::which; + +/// Returns the path to the location of the bundled Protobuf artifacts. +fn bundle_path() -> PathBuf { + env::current_dir().unwrap().join("third-party") +} + +/// Returns the path to the Protobuf include directory pointed to by the `PROTOC_INCLUDE` +/// environment variable, if it is set. +fn env_protoc_include() -> Option { + let protoc_include = match env::var_os("PROTOC_INCLUDE") { + Some(path) => PathBuf::from(path), + None => return None, + }; + + if !protoc_include.exists() { + panic!( + "PROTOC_INCLUDE environment variable points to non-existent directory ({:?})", + protoc_include + ); + } + if !protoc_include.is_dir() { + panic!( + "PROTOC_INCLUDE environment variable points to a non-directory file ({:?})", + protoc_include + ); + } + + Some(protoc_include) +} + +/// Returns the path to the bundled Protobuf include directory. +fn bundled_protoc_include() -> PathBuf { + bundle_path().join("include") +} + +/// Check for `protoc` via the `PROTOC` env var or in the `PATH`. +fn path_protoc() -> Option { + env::var_os("PROTOC") + .map(PathBuf::from) + .or_else(|| which("protoc").ok()) +} + +/// Returns true if the vendored flag is enabled. +fn vendored() -> bool { + cfg_if! { + if #[cfg(feature = "vendored")] { + true + } else { + false + } + } +} + +/// Compile `protoc` via `cmake`. +fn compile() -> Option { + let protobuf_src = bundle_path().join("protobuf").join("cmake"); + + println!("cargo:rerun-if-changed={}", protobuf_src.display()); + + let dst = cmake::Config::new(protobuf_src) + .define("protobuf_BUILD_TESTS", "OFF") + .build(); + + Some(dst.join("bin").join("protoc")) +} + +/// Try to find a `protoc` through a few methods. +/// +/// Check module docs for more info. +fn protoc() -> Option { + if env::var_os("PROTOC_NO_VENDOR").is_some() { + path_protoc() + } else if vendored() { + compile() + } else { + path_protoc().or_else(compile) + } +} + +fn main() { + let protoc = protoc().expect( + "Failed to find or build the protoc binary. The PROTOC environment \ + is not set, `protoc` is not in PATH or you are missing the requirements to compile protobuf \ + from source. \n \ + Check out the `prost-build` README for instructions on the requirements: \ + https://github.com/tokio-rs/prost#generated-code", + ); + + let protoc_include = env_protoc_include().unwrap_or_else(bundled_protoc_include); + + println!("cargo:rustc-env=PROTOC={}", protoc.display()); + println!( + "cargo:rustc-env=PROTOC_INCLUDE={}", + protoc_include.display() + ); + println!("cargo:rerun-if-env-changed=PROTOC"); + println!("cargo:rerun-if-env-changed=PROTOC_INCLUDE"); +} diff --git a/prost-build/src/ast.rs b/prost-build/src/ast.rs new file mode 100644 index 0000000..9b99884 --- /dev/null +++ b/prost-build/src/ast.rs @@ -0,0 +1,336 @@ +use lazy_static::lazy_static; +use prost_types::source_code_info::Location; +#[cfg(feature = "cleanup-markdown")] +use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag}; +use regex::Regex; + +/// Comments on a Protobuf item. +#[derive(Debug)] +pub struct Comments { + /// Leading detached blocks of comments. + pub leading_detached: Vec>, + + /// Leading comments. + pub leading: Vec, + + /// Trailing comments. + pub trailing: Vec, +} + +impl Comments { + pub(crate) fn from_location(location: &Location) -> Comments { + #[cfg(not(feature = "cleanup-markdown"))] + fn get_lines(comments: S) -> Vec + where + S: AsRef, + { + comments.as_ref().lines().map(str::to_owned).collect() + } + + #[cfg(feature = "cleanup-markdown")] + fn get_lines(comments: S) -> Vec + where + S: AsRef, + { + let comments = comments.as_ref(); + let mut buffer = String::with_capacity(comments.len() + 256); + let opts = pulldown_cmark_to_cmark::Options { + code_block_token_count: 3, + ..Default::default() + }; + match pulldown_cmark_to_cmark::cmark_with_options( + Parser::new_ext(comments, Options::all() - Options::ENABLE_SMART_PUNCTUATION).map( + |event| { + fn map_codeblock(kind: CodeBlockKind) -> CodeBlockKind { + match kind { + CodeBlockKind::Fenced(s) => { + if &*s == "rust" { + CodeBlockKind::Fenced("compile_fail".into()) + } else { + CodeBlockKind::Fenced(format!("text,{}", s).into()) + } + } + CodeBlockKind::Indented => CodeBlockKind::Fenced("text".into()), + } + } + match event { + Event::Start(Tag::CodeBlock(kind)) => { + Event::Start(Tag::CodeBlock(map_codeblock(kind))) + } + Event::End(Tag::CodeBlock(kind)) => { + Event::End(Tag::CodeBlock(map_codeblock(kind))) + } + e => e, + } + }, + ), + &mut buffer, + opts, + ) { + Ok(_) => buffer.lines().map(str::to_owned).collect(), + Err(_) => comments.lines().map(str::to_owned).collect(), + } + } + + let leading_detached = location + .leading_detached_comments + .iter() + .map(get_lines) + .collect(); + let leading = location + .leading_comments + .as_ref() + .map_or(Vec::new(), get_lines); + let trailing = location + .trailing_comments + .as_ref() + .map_or(Vec::new(), get_lines); + Comments { + leading_detached, + leading, + trailing, + } + } + + /// Appends the comments to a buffer with indentation. + /// + /// Each level of indentation corresponds to four space (' ') characters. + pub fn append_with_indent(&self, indent_level: u8, buf: &mut String) { + // Append blocks of detached comments. + for detached_block in &self.leading_detached { + for line in detached_block { + for _ in 0..indent_level { + buf.push_str(" "); + } + buf.push_str("//"); + buf.push_str(&Self::sanitize_line(line)); + buf.push('\n'); + } + buf.push('\n'); + } + + // Append leading comments. + for line in &self.leading { + for _ in 0..indent_level { + buf.push_str(" "); + } + buf.push_str("///"); + buf.push_str(&Self::sanitize_line(line)); + buf.push('\n'); + } + + // Append an empty comment line if there are leading and trailing comments. + if !self.leading.is_empty() && !self.trailing.is_empty() { + for _ in 0..indent_level { + buf.push_str(" "); + } + buf.push_str("///\n"); + } + + // Append trailing comments. + for line in &self.trailing { + for _ in 0..indent_level { + buf.push_str(" "); + } + buf.push_str("///"); + buf.push_str(&Self::sanitize_line(line)); + buf.push('\n'); + } + } + + /// Sanitizes the line for rustdoc by performing the following operations: + /// - escape urls as + /// - escape `[` & `]` + fn sanitize_line(line: &str) -> String { + lazy_static! { + static ref RULE_URL: Regex = Regex::new(r"https?://[^\s)]+").unwrap(); + static ref RULE_BRACKETS: Regex = Regex::new(r"(\[)(\S+)(])").unwrap(); + } + + let mut s = RULE_URL.replace_all(line, r"<$0>").to_string(); + s = RULE_BRACKETS.replace_all(&s, r"\$1$2\$3").to_string(); + if !s.is_empty() { + s.insert(0, ' '); + } + s + } +} + +/// A service descriptor. +#[derive(Debug)] +pub struct Service { + /// The service name in Rust style. + pub name: String, + /// The service name as it appears in the .proto file. + pub proto_name: String, + /// The package name as it appears in the .proto file. + pub package: String, + /// The service comments. + pub comments: Comments, + /// The service methods. + pub methods: Vec, + /// The service options. + pub options: prost_types::ServiceOptions, +} + +/// A service method descriptor. +#[derive(Debug)] +pub struct Method { + /// The name of the method in Rust style. + pub name: String, + /// The name of the method as it appears in the .proto file. + pub proto_name: String, + /// The method comments. + pub comments: Comments, + /// The input Rust type. + pub input_type: String, + /// The output Rust type. + pub output_type: String, + /// The input Protobuf type. + pub input_proto_type: String, + /// The output Protobuf type. + pub output_proto_type: String, + /// The method options. + pub options: prost_types::MethodOptions, + /// Identifies if client streams multiple client messages. + pub client_streaming: bool, + /// Identifies if server streams multiple server messages. + pub server_streaming: bool, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_comment_append_with_indent_sanitizes_comment_doc_url() { + struct TestCases { + name: &'static str, + input: String, + expected: String, + } + + let tests = vec![ + TestCases { + name: "valid_http", + input: "See https://www.rust-lang.org/".to_string(), + expected: "/// See \n".to_string(), + }, + TestCases { + name: "valid_https", + input: "See https://www.rust-lang.org/".to_string(), + expected: "/// See \n".to_string(), + }, + TestCases { + name: "valid_https_parenthesis", + input: "See (https://www.rust-lang.org/)".to_string(), + expected: "/// See ()\n".to_string(), + }, + TestCases { + name: "invalid", + input: "See note://abc".to_string(), + expected: "/// See note://abc\n".to_string(), + }, + ]; + for t in tests { + let input = Comments { + leading_detached: vec![], + leading: vec![], + trailing: vec![t.input], + }; + + let mut actual = "".to_string(); + input.append_with_indent(0, &mut actual); + + assert_eq!(t.expected, actual, "failed {}", t.name); + } + } + + #[test] + fn test_comment_append_with_indent_sanitizes_square_brackets() { + struct TestCases { + name: &'static str, + input: String, + expected: String, + } + + let tests = vec![ + TestCases { + name: "valid_brackets", + input: "foo [bar] baz".to_string(), + expected: "/// foo \\[bar\\] baz\n".to_string(), + }, + TestCases { + name: "invalid_start_bracket", + input: "foo [= baz".to_string(), + expected: "/// foo [= baz\n".to_string(), + }, + TestCases { + name: "invalid_end_bracket", + input: "foo =] baz".to_string(), + expected: "/// foo =] baz\n".to_string(), + }, + TestCases { + name: "invalid_bracket_combination", + input: "[0, 9)".to_string(), + expected: "/// [0, 9)\n".to_string(), + }, + ]; + for t in tests { + let input = Comments { + leading_detached: vec![], + leading: vec![], + trailing: vec![t.input], + }; + + let mut actual = "".to_string(); + input.append_with_indent(0, &mut actual); + + assert_eq!(t.expected, actual, "failed {}", t.name); + } + } + + #[test] + fn test_codeblocks() { + struct TestCase { + name: &'static str, + input: &'static str, + #[allow(unused)] + cleanedup_expected: Vec<&'static str>, + } + + let tests = vec![ + TestCase { + name: "unlabelled_block", + input: " thingy\n", + cleanedup_expected: vec!["", "```text", "thingy", "```"], + }, + TestCase { + name: "rust_block", + input: "```rust\nfoo.bar()\n```\n", + cleanedup_expected: vec!["", "```compile_fail", "foo.bar()", "```"], + }, + TestCase { + name: "js_block", + input: "```javascript\nfoo.bar()\n```\n", + cleanedup_expected: vec!["", "```text,javascript", "foo.bar()", "```"], + }, + ]; + + for t in tests { + let loc = Location { + path: vec![], + span: vec![], + leading_comments: Some(t.input.into()), + trailing_comments: None, + leading_detached_comments: vec![], + }; + let comments = Comments::from_location(&loc); + #[cfg(feature = "cleanup-markdown")] + let expected = t.cleanedup_expected; + #[cfg(not(feature = "cleanup-markdown"))] + let expected: Vec<&str> = t.input.lines().collect(); + assert_eq!(expected, comments.leading, "failed {}", t.name); + } + } +} diff --git a/prost-build/src/code_generator.rs b/prost-build/src/code_generator.rs new file mode 100644 index 0000000..ca5e37e --- /dev/null +++ b/prost-build/src/code_generator.rs @@ -0,0 +1,1152 @@ +use std::ascii; +use std::borrow::Cow; +use std::collections::{HashMap, HashSet}; +use std::iter; + +use itertools::{Either, Itertools}; +use log::debug; +use multimap::MultiMap; +use prost_types::field_descriptor_proto::{Label, Type}; +use prost_types::source_code_info::Location; +use prost_types::{ + DescriptorProto, EnumDescriptorProto, EnumValueDescriptorProto, FieldDescriptorProto, + FieldOptions, FileDescriptorProto, OneofDescriptorProto, ServiceDescriptorProto, + SourceCodeInfo, +}; + +use crate::ast::{Comments, Method, Service}; +use crate::extern_paths::ExternPaths; +use crate::ident::{to_snake, to_upper_camel}; +use crate::message_graph::MessageGraph; +use crate::{BytesType, Config, MapType}; + +#[derive(PartialEq)] +enum Syntax { + Proto2, + Proto3, +} + +pub struct CodeGenerator<'a> { + config: &'a mut Config, + package: String, + source_info: SourceCodeInfo, + syntax: Syntax, + message_graph: &'a MessageGraph, + extern_paths: &'a ExternPaths, + depth: u8, + path: Vec, + buf: &'a mut String, +} + +fn push_indent(buf: &mut String, depth: u8) { + for _ in 0..depth { + buf.push_str(" "); + } +} +impl<'a> CodeGenerator<'a> { + pub fn generate( + config: &mut Config, + message_graph: &MessageGraph, + extern_paths: &ExternPaths, + file: FileDescriptorProto, + buf: &mut String, + ) { + let mut source_info = file + .source_code_info + .expect("no source code info in request"); + source_info.location.retain(|location| { + let len = location.path.len(); + len > 0 && len % 2 == 0 + }); + source_info + .location + .sort_by_key(|location| location.path.clone()); + + let syntax = match file.syntax.as_ref().map(String::as_str) { + None | Some("proto2") => Syntax::Proto2, + Some("proto3") => Syntax::Proto3, + Some(s) => panic!("unknown syntax: {}", s), + }; + + let mut code_gen = CodeGenerator { + config, + package: file.package.unwrap_or_default(), + source_info, + syntax, + message_graph, + extern_paths, + depth: 0, + path: Vec::new(), + buf, + }; + + debug!( + "file: {:?}, package: {:?}", + file.name.as_ref().unwrap(), + code_gen.package + ); + + code_gen.path.push(4); + for (idx, message) in file.message_type.into_iter().enumerate() { + code_gen.path.push(idx as i32); + code_gen.append_message(message); + code_gen.path.pop(); + } + code_gen.path.pop(); + + code_gen.path.push(5); + for (idx, desc) in file.enum_type.into_iter().enumerate() { + code_gen.path.push(idx as i32); + code_gen.append_enum(desc); + code_gen.path.pop(); + } + code_gen.path.pop(); + + if code_gen.config.service_generator.is_some() { + code_gen.path.push(6); + for (idx, service) in file.service.into_iter().enumerate() { + code_gen.path.push(idx as i32); + code_gen.push_service(service); + code_gen.path.pop(); + } + + if let Some(service_generator) = code_gen.config.service_generator.as_mut() { + service_generator.finalize(code_gen.buf); + } + + code_gen.path.pop(); + } + } + + fn append_message(&mut self, message: DescriptorProto) { + debug!(" message: {:?}", message.name()); + + let message_name = message.name().to_string(); + let fq_message_name = format!( + "{}{}.{}", + if self.package.is_empty() { "" } else { "." }, + self.package, + message.name() + ); + + // Skip external types. + if self.extern_paths.resolve_ident(&fq_message_name).is_some() { + return; + } + + // Split the nested message types into a vector of normal nested message types, and a map + // of the map field entry types. The path index of the nested message types is preserved so + // that comments can be retrieved. + type NestedTypes = Vec<(DescriptorProto, usize)>; + type MapTypes = HashMap; + let (nested_types, map_types): (NestedTypes, MapTypes) = message + .nested_type + .into_iter() + .enumerate() + .partition_map(|(idx, nested_type)| { + if nested_type + .options + .as_ref() + .and_then(|options| options.map_entry) + .unwrap_or(false) + { + let key = nested_type.field[0].clone(); + let value = nested_type.field[1].clone(); + assert_eq!("key", key.name()); + assert_eq!("value", value.name()); + + let name = format!("{}.{}", &fq_message_name, nested_type.name()); + Either::Right((name, (key, value))) + } else { + Either::Left((nested_type, idx)) + } + }); + + // Split the fields into a vector of the normal fields, and oneof fields. + // Path indexes are preserved so that comments can be retrieved. + type Fields = Vec<(FieldDescriptorProto, usize)>; + type OneofFields = MultiMap; + let (fields, mut oneof_fields): (Fields, OneofFields) = message + .field + .into_iter() + .enumerate() + .partition_map(|(idx, field)| { + if field.proto3_optional.unwrap_or(false) { + Either::Left((field, idx)) + } else if let Some(oneof_index) = field.oneof_index { + Either::Right((oneof_index, (field, idx))) + } else { + Either::Left((field, idx)) + } + }); + + self.append_doc(&fq_message_name, None); + self.append_type_attributes(&fq_message_name); + self.push_indent(); + self.buf + .push_str("#[derive(Clone, PartialEq, ::ntex_grpc::Message)]\n"); + self.push_indent(); + self.buf.push_str("pub struct "); + self.buf.push_str(&to_upper_camel(&message_name)); + self.buf.push_str(" {\n"); + + self.depth += 1; + self.path.push(2); + for (field, idx) in fields { + self.path.push(idx as i32); + match field + .type_name + .as_ref() + .and_then(|type_name| map_types.get(type_name)) + { + Some(&(ref key, ref value)) => { + self.append_map_field(&fq_message_name, field, key, value) + } + None => self.append_field(&fq_message_name, field), + } + self.path.pop(); + } + self.path.pop(); + + self.path.push(8); + for (idx, oneof) in message.oneof_decl.iter().enumerate() { + let idx = idx as i32; + + let fields = match oneof_fields.get_vec(&idx) { + Some(fields) => fields, + None => continue, + }; + + self.path.push(idx); + self.append_oneof_field(&message_name, &fq_message_name, oneof, fields); + self.path.pop(); + } + self.path.pop(); + + self.depth -= 1; + self.push_indent(); + self.buf.push_str("}\n"); + + if !message.enum_type.is_empty() || !nested_types.is_empty() || !oneof_fields.is_empty() { + self.push_mod(&message_name); + self.path.push(3); + for (nested_type, idx) in nested_types { + self.path.push(idx as i32); + self.append_message(nested_type); + self.path.pop(); + } + self.path.pop(); + + self.path.push(4); + for (idx, nested_enum) in message.enum_type.into_iter().enumerate() { + self.path.push(idx as i32); + self.append_enum(nested_enum); + self.path.pop(); + } + self.path.pop(); + + for (idx, oneof) in message.oneof_decl.into_iter().enumerate() { + let idx = idx as i32; + // optional fields create a synthetic oneof that we want to skip + let fields = match oneof_fields.remove(&idx) { + Some(fields) => fields, + None => continue, + }; + self.append_oneof(&fq_message_name, oneof, idx, fields); + } + + self.pop_mod(); + } + } + + fn append_type_attributes(&mut self, fq_message_name: &str) { + assert_eq!(b'.', fq_message_name.as_bytes()[0]); + for attribute in self.config.type_attributes.get(fq_message_name) { + push_indent(self.buf, self.depth); + self.buf.push_str(attribute); + self.buf.push('\n'); + } + } + + fn append_field_attributes(&mut self, fq_message_name: &str, field_name: &str) { + assert_eq!(b'.', fq_message_name.as_bytes()[0]); + for attribute in self + .config + .field_attributes + .get_field(fq_message_name, field_name) + { + push_indent(self.buf, self.depth); + self.buf.push_str(attribute); + self.buf.push('\n'); + } + } + + fn append_field(&mut self, fq_message_name: &str, field: FieldDescriptorProto) { + let type_ = field.r#type(); + let repeated = field.label == Some(Label::Repeated as i32); + let deprecated = self.deprecated(&field); + let optional = self.optional(&field); + let ty = self.resolve_type(&field, fq_message_name); + + let boxed = !repeated + && (type_ == Type::Message || type_ == Type::Group) + && self + .message_graph + .is_nested(field.type_name(), fq_message_name); + + debug!( + " field: {:?}, type: {:?}, boxed: {}", + field.name(), + ty, + boxed + ); + + self.append_doc(fq_message_name, Some(field.name())); + + if deprecated { + self.push_indent(); + self.buf.push_str("#[deprecated]\n"); + } + + self.push_indent(); + self.buf.push_str("#[prost("); + let type_tag = self.field_type_tag(&field); + self.buf.push_str(&type_tag); + + match field.label() { + Label::Optional => { + if optional { + self.buf.push_str(", optional"); + } + } + Label::Required => self.buf.push_str(", required"), + Label::Repeated => { + self.buf.push_str(", repeated"); + if can_pack(&field) + && !field + .options + .as_ref() + .map_or(self.syntax == Syntax::Proto3, |options| options.packed()) + { + self.buf.push_str(", packed=\"false\""); + } + } + } + + if boxed { + self.buf.push_str(", boxed"); + } + self.buf.push_str(", tag=\""); + self.buf.push_str(&field.number().to_string()); + + if let Some(ref default) = field.default_value { + self.buf.push_str("\", default=\""); + if type_ == Type::Bytes { + self.buf.push_str("b\\\""); + for b in unescape_c_escape_string(default) { + self.buf.extend( + ascii::escape_default(b).flat_map(|c| (c as char).escape_default()), + ); + } + self.buf.push_str("\\\""); + } else if type_ == Type::Enum { + let mut enum_value = to_upper_camel(default); + if self.config.strip_enum_prefix { + // Field types are fully qualified, so we extract + // the last segment and strip it from the left + // side of the default value. + let enum_type = field + .type_name + .as_ref() + .and_then(|ty| ty.split('.').last()) + .unwrap(); + + enum_value = strip_enum_prefix(&to_upper_camel(enum_type), &enum_value) + } + self.buf.push_str(&enum_value); + } else { + self.buf.push_str(&default.escape_default().to_string()); + } + } + + self.buf.push_str("\")]\n"); + self.append_field_attributes(fq_message_name, field.name()); + self.push_indent(); + self.buf.push_str("pub "); + self.buf.push_str(&to_snake(field.name())); + self.buf.push_str(": "); + if repeated { + self.buf.push_str("::prost::alloc::vec::Vec<"); + } else if optional { + self.buf.push_str("::core::option::Option<"); + } + if boxed { + self.buf.push_str("::prost::alloc::boxed::Box<"); + } + self.buf.push_str(&ty); + if boxed { + self.buf.push('>'); + } + if repeated || optional { + self.buf.push('>'); + } + self.buf.push_str(",\n"); + } + + fn append_map_field( + &mut self, + fq_message_name: &str, + field: FieldDescriptorProto, + key: &FieldDescriptorProto, + value: &FieldDescriptorProto, + ) { + let key_ty = self.resolve_type(key, fq_message_name); + let value_ty = self.resolve_type(value, fq_message_name); + + debug!( + " map field: {:?}, key type: {:?}, value type: {:?}", + field.name(), + key_ty, + value_ty + ); + + self.append_doc(fq_message_name, Some(field.name())); + self.push_indent(); + + let map_type = self + .config + .map_type + .get_first_field(fq_message_name, field.name()) + .copied() + .unwrap_or_default(); + let key_tag = self.field_type_tag(key); + let value_tag = self.map_value_type_tag(value); + + self.buf.push_str(&format!( + "#[prost({}=\"{}, {}\", tag=\"{}\")]\n", + map_type.annotation(), + key_tag, + value_tag, + field.number() + )); + self.append_field_attributes(fq_message_name, field.name()); + self.push_indent(); + self.buf.push_str(&format!( + "pub {}: {}<{}, {}>,\n", + to_snake(field.name()), + map_type.rust_type(), + key_ty, + value_ty + )); + } + + fn append_oneof_field( + &mut self, + message_name: &str, + fq_message_name: &str, + oneof: &OneofDescriptorProto, + fields: &[(FieldDescriptorProto, usize)], + ) { + let name = format!( + "{}::{}", + to_snake(message_name), + to_upper_camel(oneof.name()) + ); + self.append_doc(fq_message_name, None); + self.push_indent(); + self.buf.push_str(&format!( + "#[prost(oneof=\"{}\", tags=\"{}\")]\n", + name, + fields + .iter() + .map(|&(ref field, _)| field.number()) + .join(", ") + )); + self.append_field_attributes(fq_message_name, oneof.name()); + self.push_indent(); + self.buf.push_str(&format!( + "pub {}: ::core::option::Option<{}>,\n", + to_snake(oneof.name()), + name + )); + } + + fn append_oneof( + &mut self, + fq_message_name: &str, + oneof: OneofDescriptorProto, + idx: i32, + fields: Vec<(FieldDescriptorProto, usize)>, + ) { + self.path.push(8); + self.path.push(idx); + self.append_doc(fq_message_name, None); + self.path.pop(); + self.path.pop(); + + let oneof_name = format!("{}.{}", fq_message_name, oneof.name()); + self.append_type_attributes(&oneof_name); + self.push_indent(); + self.buf + .push_str("#[derive(Clone, PartialEq, ::prost::Oneof)]\n"); + self.push_indent(); + self.buf.push_str("pub enum "); + self.buf.push_str(&to_upper_camel(oneof.name())); + self.buf.push_str(" {\n"); + + self.path.push(2); + self.depth += 1; + for (field, idx) in fields { + let type_ = field.r#type(); + + self.path.push(idx as i32); + self.append_doc(fq_message_name, Some(field.name())); + self.path.pop(); + + self.push_indent(); + let ty_tag = self.field_type_tag(&field); + self.buf.push_str(&format!( + "#[prost({}, tag=\"{}\")]\n", + ty_tag, + field.number() + )); + self.append_field_attributes(&oneof_name, field.name()); + + self.push_indent(); + let ty = self.resolve_type(&field, fq_message_name); + + let boxed = (type_ == Type::Message || type_ == Type::Group) + && self + .message_graph + .is_nested(field.type_name(), fq_message_name); + + debug!( + " oneof: {:?}, type: {:?}, boxed: {}", + field.name(), + ty, + boxed + ); + + if boxed { + self.buf.push_str(&format!( + "{}(::prost::alloc::boxed::Box<{}>),\n", + to_upper_camel(field.name()), + ty + )); + } else { + self.buf + .push_str(&format!("{}({}),\n", to_upper_camel(field.name()), ty)); + } + } + self.depth -= 1; + self.path.pop(); + + self.push_indent(); + self.buf.push_str("}\n"); + } + + fn location(&self) -> &Location { + let idx = self + .source_info + .location + .binary_search_by_key(&&self.path[..], |location| &location.path[..]) + .unwrap(); + + &self.source_info.location[idx] + } + + fn append_doc(&mut self, fq_name: &str, field_name: Option<&str>) { + let append_doc = if let Some(field_name) = field_name { + self.config + .disable_comments + .get_first_field(fq_name, field_name) + .is_none() + } else { + self.config.disable_comments.get(fq_name).next().is_none() + }; + if append_doc { + Comments::from_location(self.location()).append_with_indent(self.depth, self.buf) + } + } + + fn append_enum(&mut self, desc: EnumDescriptorProto) { + debug!(" enum: {:?}", desc.name()); + + let proto_enum_name = desc.name(); + let enum_name = to_upper_camel(proto_enum_name); + + let enum_values = &desc.value; + let fq_proto_enum_name = format!( + "{}{}.{}", + if self.package.is_empty() { "" } else { "." }, + self.package, + proto_enum_name + ); + if self + .extern_paths + .resolve_ident(&fq_proto_enum_name) + .is_some() + { + return; + } + + self.append_doc(&fq_proto_enum_name, None); + self.append_type_attributes(&fq_proto_enum_name); + self.push_indent(); + self.buf.push_str( + "#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]\n", + ); + self.push_indent(); + self.buf.push_str("#[repr(i32)]\n"); + self.push_indent(); + self.buf.push_str("pub enum "); + self.buf.push_str(&enum_name); + self.buf.push_str(" {\n"); + + let variant_mappings = + build_enum_value_mappings(&enum_name, self.config.strip_enum_prefix, enum_values); + + self.depth += 1; + self.path.push(2); + for variant in variant_mappings.iter() { + self.path.push(variant.path_idx as i32); + + self.append_doc(&fq_proto_enum_name, Some(variant.proto_name)); + self.append_field_attributes(&fq_proto_enum_name, variant.proto_name); + self.push_indent(); + self.buf.push_str(&variant.generated_variant_name); + self.buf.push_str(" = "); + self.buf.push_str(&variant.proto_number.to_string()); + self.buf.push_str(",\n"); + + self.path.pop(); + } + + self.path.pop(); + self.depth -= 1; + + self.push_indent(); + self.buf.push_str("}\n"); + + self.push_indent(); + self.buf.push_str("impl "); + self.buf.push_str(&enum_name); + self.buf.push_str(" {\n"); + self.depth += 1; + self.path.push(2); + + self.push_indent(); + self.buf.push_str( + "/// String value of the enum field names used in the ProtoBuf definition.\n", + ); + self.push_indent(); + self.buf.push_str("///\n"); + self.push_indent(); + self.buf.push_str( + "/// The values are not transformed in any way and thus are considered stable\n", + ); + self.push_indent(); + self.buf.push_str( + "/// (if the ProtoBuf definition does not change) and safe for programmatic use.\n", + ); + self.push_indent(); + self.buf + .push_str("pub fn to_str_name(&self) -> &'static str {\n"); + self.depth += 1; + + self.push_indent(); + self.buf.push_str("match self {\n"); + self.depth += 1; + + for variant in variant_mappings.iter() { + self.push_indent(); + self.buf.push_str(&enum_name); + self.buf.push_str("::"); + self.buf.push_str(&variant.generated_variant_name); + self.buf.push_str(" => \""); + self.buf.push_str(variant.proto_name); + self.buf.push_str("\",\n"); + } + + self.depth -= 1; + self.push_indent(); + self.buf.push_str("}\n"); // End of match + + self.depth -= 1; + self.push_indent(); + self.buf.push_str("}\n"); // End of to_str_name() + + self.path.pop(); + self.depth -= 1; + self.push_indent(); + self.buf.push_str("}\n"); // End of impl + } + + fn push_service(&mut self, service: ServiceDescriptorProto) { + let name = service.name().to_owned(); + debug!(" service: {:?}", name); + + let comments = Comments::from_location(self.location()); + + self.path.push(2); + let methods = service + .method + .into_iter() + .enumerate() + .map(|(idx, mut method)| { + debug!(" method: {:?}", method.name()); + self.path.push(idx as i32); + let comments = Comments::from_location(self.location()); + self.path.pop(); + + let name = method.name.take().unwrap(); + let input_proto_type = method.input_type.take().unwrap(); + let output_proto_type = method.output_type.take().unwrap(); + let input_type = self.resolve_ident(&input_proto_type); + let output_type = self.resolve_ident(&output_proto_type); + let client_streaming = method.client_streaming(); + let server_streaming = method.server_streaming(); + + Method { + name: to_snake(&name), + proto_name: name, + comments, + input_type, + output_type, + input_proto_type, + output_proto_type, + options: method.options.unwrap_or_default(), + client_streaming, + server_streaming, + } + }) + .collect(); + self.path.pop(); + + let service = Service { + name: to_upper_camel(&name), + proto_name: name, + package: self.package.clone(), + comments, + methods, + options: service.options.unwrap_or_default(), + }; + + if let Some(service_generator) = self.config.service_generator.as_mut() { + service_generator.generate(service, self.buf) + } + } + + fn push_indent(&mut self) { + push_indent(self.buf, self.depth); + } + + fn push_mod(&mut self, module: &str) { + self.push_indent(); + self.buf.push_str("/// Nested message and enum types in `"); + self.buf.push_str(module); + self.buf.push_str("`.\n"); + + self.push_indent(); + self.buf.push_str("pub mod "); + self.buf.push_str(&to_snake(module)); + self.buf.push_str(" {\n"); + + self.package.push('.'); + self.package.push_str(module); + + self.depth += 1; + } + + fn pop_mod(&mut self) { + self.depth -= 1; + + let idx = self.package.rfind('.').unwrap(); + self.package.truncate(idx); + + self.push_indent(); + self.buf.push_str("}\n"); + } + + fn resolve_type(&self, field: &FieldDescriptorProto, fq_message_name: &str) -> String { + match field.r#type() { + Type::Float => String::from("f32"), + Type::Double => String::from("f64"), + Type::Uint32 | Type::Fixed32 => String::from("u32"), + Type::Uint64 | Type::Fixed64 => String::from("u64"), + Type::Int32 | Type::Sfixed32 | Type::Sint32 | Type::Enum => String::from("i32"), + Type::Int64 | Type::Sfixed64 | Type::Sint64 => String::from("i64"), + Type::Bool => String::from("bool"), + Type::String => String::from("::ntex_grpc::types::ByteString"), + Type::Bytes => self + .config + .bytes_type + .get_first_field(fq_message_name, field.name()) + .cloned() + .unwrap_or_default() + .rust_type() + .to_owned(), + Type::Group | Type::Message => self.resolve_ident(field.type_name()), + } + } + + fn resolve_ident(&self, pb_ident: &str) -> String { + // protoc should always give fully qualified identifiers. + assert_eq!(".", &pb_ident[..1]); + + if let Some(proto_ident) = self.extern_paths.resolve_ident(pb_ident) { + return proto_ident; + } + + let mut local_path = self.package.split('.').peekable(); + + // If no package is specified the start of the package name will be '.' + // and split will return an empty string ("") which breaks resolution + // The fix to this is to ignore the first item if it is empty. + if local_path.peek().map_or(false, |s| s.is_empty()) { + local_path.next(); + } + + let mut ident_path = pb_ident[1..].split('.'); + let ident_type = ident_path.next_back().unwrap(); + let mut ident_path = ident_path.peekable(); + + // Skip path elements in common. + while local_path.peek().is_some() && local_path.peek() == ident_path.peek() { + local_path.next(); + ident_path.next(); + } + + local_path + .map(|_| "super".to_string()) + .chain(ident_path.map(to_snake)) + .chain(iter::once(to_upper_camel(ident_type))) + .join("::") + } + + fn field_type_tag(&self, field: &FieldDescriptorProto) -> Cow<'static, str> { + match field.r#type() { + Type::Float => Cow::Borrowed("float"), + Type::Double => Cow::Borrowed("double"), + Type::Int32 => Cow::Borrowed("int32"), + Type::Int64 => Cow::Borrowed("int64"), + Type::Uint32 => Cow::Borrowed("uint32"), + Type::Uint64 => Cow::Borrowed("uint64"), + Type::Sint32 => Cow::Borrowed("sint32"), + Type::Sint64 => Cow::Borrowed("sint64"), + Type::Fixed32 => Cow::Borrowed("fixed32"), + Type::Fixed64 => Cow::Borrowed("fixed64"), + Type::Sfixed32 => Cow::Borrowed("sfixed32"), + Type::Sfixed64 => Cow::Borrowed("sfixed64"), + Type::Bool => Cow::Borrowed("bool"), + Type::String => Cow::Borrowed("string"), + Type::Bytes => Cow::Borrowed("bytes"), + Type::Group => Cow::Borrowed("group"), + Type::Message => Cow::Borrowed("message"), + Type::Enum => Cow::Owned(format!( + "enumeration={:?}", + self.resolve_ident(field.type_name()) + )), + } + } + + fn map_value_type_tag(&self, field: &FieldDescriptorProto) -> Cow<'static, str> { + match field.r#type() { + Type::Enum => Cow::Owned(format!( + "enumeration({})", + self.resolve_ident(field.type_name()) + )), + _ => self.field_type_tag(field), + } + } + + fn optional(&self, field: &FieldDescriptorProto) -> bool { + if field.proto3_optional.unwrap_or(false) { + return true; + } + + if field.label() != Label::Optional { + return false; + } + + match field.r#type() { + Type::Message => true, + _ => self.syntax == Syntax::Proto2, + } + } + + /// Returns `true` if the field options includes the `deprecated` option. + fn deprecated(&self, field: &FieldDescriptorProto) -> bool { + field + .options + .as_ref() + .map_or(false, FieldOptions::deprecated) + } +} + +/// Returns `true` if the repeated field type can be packed. +fn can_pack(field: &FieldDescriptorProto) -> bool { + matches!( + field.r#type(), + Type::Float + | Type::Double + | Type::Int32 + | Type::Int64 + | Type::Uint32 + | Type::Uint64 + | Type::Sint32 + | Type::Sint64 + | Type::Fixed32 + | Type::Fixed64 + | Type::Sfixed32 + | Type::Sfixed64 + | Type::Bool + | Type::Enum + ) +} + +/// Based on [`google::protobuf::UnescapeCEscapeString`][1] +/// [1]: https://github.com/google/protobuf/blob/3.3.x/src/google/protobuf/stubs/strutil.cc#L312-L322 +fn unescape_c_escape_string(s: &str) -> Vec { + let src = s.as_bytes(); + let len = src.len(); + let mut dst = Vec::new(); + + let mut p = 0; + + while p < len { + if src[p] != b'\\' { + dst.push(src[p]); + p += 1; + } else { + p += 1; + if p == len { + panic!( + "invalid c-escaped default binary value ({}): ends with '\'", + s + ) + } + match src[p] { + b'a' => { + dst.push(0x07); + p += 1; + } + b'b' => { + dst.push(0x08); + p += 1; + } + b'f' => { + dst.push(0x0C); + p += 1; + } + b'n' => { + dst.push(0x0A); + p += 1; + } + b'r' => { + dst.push(0x0D); + p += 1; + } + b't' => { + dst.push(0x09); + p += 1; + } + b'v' => { + dst.push(0x0B); + p += 1; + } + b'\\' => { + dst.push(0x5C); + p += 1; + } + b'?' => { + dst.push(0x3F); + p += 1; + } + b'\'' => { + dst.push(0x27); + p += 1; + } + b'"' => { + dst.push(0x22); + p += 1; + } + b'0'..=b'7' => { + eprintln!("another octal: {}, offset: {}", s, &s[p..]); + let mut octal = 0; + for _ in 0..3 { + if p < len && src[p] >= b'0' && src[p] <= b'7' { + eprintln!("\toctal: {}", octal); + octal = octal * 8 + (src[p] - b'0'); + p += 1; + } else { + break; + } + } + dst.push(octal); + } + b'x' | b'X' => { + if p + 3 > len { + panic!( + "invalid c-escaped default binary value ({}): incomplete hex value", + s + ) + } + match u8::from_str_radix(&s[p + 1..p + 3], 16) { + Ok(b) => dst.push(b), + _ => panic!( + "invalid c-escaped default binary value ({}): invalid hex value", + &s[p..p + 2] + ), + } + p += 3; + } + _ => panic!( + "invalid c-escaped default binary value ({}): invalid escape", + s + ), + } + } + } + dst +} + +/// Strip an enum's type name from the prefix of an enum value. +/// +/// This function assumes that both have been formatted to Rust's +/// upper camel case naming conventions. +/// +/// It also tries to handle cases where the stripped name would be +/// invalid - for example, if it were to begin with a number. +fn strip_enum_prefix(prefix: &str, name: &str) -> String { + let stripped = name.strip_prefix(prefix).unwrap_or(name); + + // If the next character after the stripped prefix is not + // uppercase, then it means that we didn't have a true prefix - + // for example, "Foo" should not be stripped from "Foobar". + if stripped + .chars() + .next() + .map(char::is_uppercase) + .unwrap_or(false) + { + stripped.to_owned() + } else { + name.to_owned() + } +} + +struct EnumVariantMapping<'a> { + path_idx: usize, + proto_name: &'a str, + proto_number: i32, + generated_variant_name: String, +} + +fn build_enum_value_mappings<'a>( + generated_enum_name: &str, + do_strip_enum_prefix: bool, + enum_values: &'a [EnumValueDescriptorProto], +) -> Vec> { + let mut numbers = HashSet::new(); + let mut generated_names = HashMap::new(); + let mut mappings = Vec::new(); + + for (idx, value) in enum_values.iter().enumerate() { + // Skip duplicate enum values. Protobuf allows this when the + // 'allow_alias' option is set. + if !numbers.insert(value.number()) { + continue; + } + + let mut generated_variant_name = to_upper_camel(value.name()); + if do_strip_enum_prefix { + generated_variant_name = + strip_enum_prefix(generated_enum_name, &generated_variant_name); + } + + if let Some(old_v) = + generated_names.insert(generated_variant_name.to_owned(), value.name()) + { + panic!("Generated enum variant names overlap: `{}` variant name to be used both by `{}` and `{}` ProtoBuf enum values", + generated_variant_name, old_v, value.name()); + } + + mappings.push(EnumVariantMapping { + path_idx: idx, + proto_name: value.name(), + proto_number: value.number(), + generated_variant_name, + }) + } + mappings +} + +impl MapType { + /// The `prost-derive` annotation type corresponding to the map type. + fn annotation(&self) -> &'static str { + match self { + MapType::HashMap => "map", + MapType::BTreeMap => "btree_map", + } + } + + /// The fully-qualified Rust type corresponding to the map type. + fn rust_type(&self) -> &'static str { + match self { + MapType::HashMap => "::std::collections::HashMap", + MapType::BTreeMap => "::prost::alloc::collections::BTreeMap", + } + } +} + +impl BytesType { + /// The fully-qualified Rust type corresponding to the bytes type. + fn rust_type(&self) -> &str { + match self { + BytesType::Bytes => "::ntex_grpc::types::Bytes", + BytesType::Custom(s) => s, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_unescape_c_escape_string() { + assert_eq!( + &b"hello world"[..], + &unescape_c_escape_string("hello world")[..] + ); + + assert_eq!(&b"\0"[..], &unescape_c_escape_string(r#"\0"#)[..]); + + assert_eq!( + &[0o012, 0o156], + &unescape_c_escape_string(r#"\012\156"#)[..] + ); + assert_eq!(&[0x01, 0x02], &unescape_c_escape_string(r#"\x01\x02"#)[..]); + + assert_eq!( + &b"\0\x01\x07\x08\x0C\n\r\t\x0B\\\'\"\xFE"[..], + &unescape_c_escape_string(r#"\0\001\a\b\f\n\r\t\v\\\'\"\xfe"#)[..] + ); + } + + #[test] + #[should_panic(expected = "incomplete hex value")] + fn test_unescape_c_escape_string_incomplete_hex_value() { + unescape_c_escape_string(r#"\x1"#); + } + + #[test] + fn test_strip_enum_prefix() { + assert_eq!(strip_enum_prefix("Foo", "FooBar"), "Bar"); + assert_eq!(strip_enum_prefix("Foo", "Foobar"), "Foobar"); + assert_eq!(strip_enum_prefix("Foo", "Foo"), "Foo"); + assert_eq!(strip_enum_prefix("Foo", "Bar"), "Bar"); + assert_eq!(strip_enum_prefix("Foo", "Foo1"), "Foo1"); + } +} diff --git a/prost-build/src/extern_paths.rs b/prost-build/src/extern_paths.rs new file mode 100644 index 0000000..27c8d6d --- /dev/null +++ b/prost-build/src/extern_paths.rs @@ -0,0 +1,170 @@ +use std::collections::{hash_map, HashMap}; + +use itertools::Itertools; + +use crate::ident::{to_snake, to_upper_camel}; + +fn validate_proto_path(path: &str) -> Result<(), String> { + if path.chars().next().map(|c| c != '.').unwrap_or(true) { + return Err(format!( + "Protobuf paths must be fully qualified (begin with a leading '.'): {}", + path + )); + } + if path.split('.').skip(1).any(str::is_empty) { + return Err(format!("invalid fully-qualified Protobuf path: {}", path)); + } + Ok(()) +} + +#[derive(Debug)] +pub struct ExternPaths { + extern_paths: HashMap, +} + +impl ExternPaths { + pub fn new(paths: &[(String, String)], prost_types: bool) -> Result { + let mut extern_paths = ExternPaths { + extern_paths: HashMap::new(), + }; + + for (proto_path, rust_path) in paths { + extern_paths.insert(proto_path.clone(), rust_path.clone())?; + } + + if prost_types { + extern_paths.insert(".google.protobuf".to_string(), "::prost_types".to_string())?; + extern_paths.insert(".google.protobuf.BoolValue".to_string(), "bool".to_string())?; + extern_paths.insert( + ".google.protobuf.BytesValue".to_string(), + "::prost::alloc::vec::Vec".to_string(), + )?; + extern_paths.insert( + ".google.protobuf.DoubleValue".to_string(), + "f64".to_string(), + )?; + extern_paths.insert(".google.protobuf.Empty".to_string(), "()".to_string())?; + extern_paths.insert(".google.protobuf.FloatValue".to_string(), "f32".to_string())?; + extern_paths.insert(".google.protobuf.Int32Value".to_string(), "i32".to_string())?; + extern_paths.insert(".google.protobuf.Int64Value".to_string(), "i64".to_string())?; + extern_paths.insert( + ".google.protobuf.StringValue".to_string(), + "::prost::alloc::string::String".to_string(), + )?; + extern_paths.insert( + ".google.protobuf.UInt32Value".to_string(), + "u32".to_string(), + )?; + extern_paths.insert( + ".google.protobuf.UInt64Value".to_string(), + "u64".to_string(), + )?; + } + + Ok(extern_paths) + } + + fn insert(&mut self, proto_path: String, rust_path: String) -> Result<(), String> { + validate_proto_path(&proto_path)?; + match self.extern_paths.entry(proto_path) { + hash_map::Entry::Occupied(occupied) => { + return Err(format!( + "duplicate extern Protobuf path: {}", + occupied.key() + )); + } + hash_map::Entry::Vacant(vacant) => vacant.insert(rust_path), + }; + Ok(()) + } + + pub fn resolve_ident(&self, pb_ident: &str) -> Option { + // protoc should always give fully qualified identifiers. + assert_eq!(".", &pb_ident[..1]); + + if let Some(rust_path) = self.extern_paths.get(pb_ident) { + return Some(rust_path.clone()); + } + + // TODO(danburkert): there must be a more efficient way to do this, maybe a trie? + for (idx, _) in pb_ident.rmatch_indices('.') { + if let Some(rust_path) = self.extern_paths.get(&pb_ident[..idx]) { + let mut segments = pb_ident[idx + 1..].split('.'); + let ident_type = segments.next_back().map(to_upper_camel); + + return Some( + rust_path + .split("::") + .chain(segments) + .enumerate() + .map(|(idx, segment)| { + if idx == 0 && segment == "crate" { + // If the first segment of the path is 'crate', then do not escape + // it into a raw identifier, since it's being used as the keyword. + segment.to_owned() + } else { + to_snake(segment) + } + }) + .chain(ident_type.into_iter()) + .join("::"), + ); + } + } + + None + } +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn test_extern_paths() { + let paths = ExternPaths::new( + &[ + (".foo".to_string(), "::foo1".to_string()), + (".foo.bar".to_string(), "::foo2".to_string()), + (".foo.baz".to_string(), "::foo3".to_string()), + (".foo.Fuzz".to_string(), "::foo4::Fuzz".to_string()), + (".a.b.c.d.e.f".to_string(), "::abc::def".to_string()), + ], + false, + ) + .unwrap(); + + let case = |proto_ident: &str, resolved_ident: &str| { + assert_eq!(paths.resolve_ident(proto_ident).unwrap(), resolved_ident); + }; + + case(".foo", "::foo1"); + case(".foo.Foo", "::foo1::Foo"); + case(".foo.bar", "::foo2"); + case(".foo.Bas", "::foo1::Bas"); + + case(".foo.bar.Bar", "::foo2::Bar"); + case(".foo.Fuzz.Bar", "::foo4::fuzz::Bar"); + + case(".a.b.c.d.e.f", "::abc::def"); + case(".a.b.c.d.e.f.g.FooBar.Baz", "::abc::def::g::foo_bar::Baz"); + + assert!(paths.resolve_ident(".a").is_none()); + assert!(paths.resolve_ident(".a.b").is_none()); + assert!(paths.resolve_ident(".a.c").is_none()); + } + + #[test] + fn test_well_known_types() { + let paths = ExternPaths::new(&[], true).unwrap(); + + let case = |proto_ident: &str, resolved_ident: &str| { + assert_eq!(paths.resolve_ident(proto_ident).unwrap(), resolved_ident); + }; + + case(".google.protobuf.Value", "::prost_types::Value"); + case(".google.protobuf.Duration", "::prost_types::Duration"); + case(".google.protobuf.Empty", "()"); + } +} diff --git a/prost-build/src/goodbye.proto b/prost-build/src/goodbye.proto new file mode 100644 index 0000000..4527d7d --- /dev/null +++ b/prost-build/src/goodbye.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +import "types.proto"; + +package helloworld; + +service Farewell { + rpc Goodbye (Message) returns (Response) {} +} diff --git a/prost-build/src/hello.proto b/prost-build/src/hello.proto new file mode 100644 index 0000000..8661cc3 --- /dev/null +++ b/prost-build/src/hello.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +import "types.proto"; + +package helloworld; + +service Greeting { + rpc Hello (Message) returns (Response) {} +} diff --git a/prost-build/src/ident.rs b/prost-build/src/ident.rs new file mode 100644 index 0000000..5c48d0f --- /dev/null +++ b/prost-build/src/ident.rs @@ -0,0 +1,162 @@ +//! Utility functions for working with identifiers. + +use heck::{ToSnakeCase, ToUpperCamelCase}; + +/// Converts a `camelCase` or `SCREAMING_SNAKE_CASE` identifier to a `lower_snake` case Rust field +/// identifier. +pub fn to_snake(s: &str) -> String { + let mut ident = s.to_snake_case(); + + // Use a raw identifier if the identifier matches a Rust keyword: + // https://doc.rust-lang.org/reference/keywords.html. + match ident.as_str() { + // 2015 strict keywords. + | "as" | "break" | "const" | "continue" | "else" | "enum" | "false" + | "fn" | "for" | "if" | "impl" | "in" | "let" | "loop" | "match" | "mod" | "move" | "mut" + | "pub" | "ref" | "return" | "static" | "struct" | "trait" | "true" + | "type" | "unsafe" | "use" | "where" | "while" + // 2018 strict keywords. + | "dyn" + // 2015 reserved keywords. + | "abstract" | "become" | "box" | "do" | "final" | "macro" | "override" | "priv" | "typeof" + | "unsized" | "virtual" | "yield" + // 2018 reserved keywords. + | "async" | "await" | "try" => ident.insert_str(0, "r#"), + // the following keywords are not supported as raw identifiers and are therefore suffixed with an underscore. + "self" | "super" | "extern" | "crate" => ident += "_", + _ => (), + } + ident +} + +/// Converts a `snake_case` identifier to an `UpperCamel` case Rust type identifier. +pub fn to_upper_camel(s: &str) -> String { + let mut ident = s.to_upper_camel_case(); + + // Suffix an underscore for the `Self` Rust keyword as it is not allowed as raw identifier. + if ident == "Self" { + ident += "_"; + } + ident +} + +#[cfg(test)] +mod tests { + + #![allow(clippy::cognitive_complexity)] + + use super::*; + + #[test] + fn test_to_snake() { + assert_eq!("foo_bar", &to_snake("FooBar")); + assert_eq!("foo_bar_baz", &to_snake("FooBarBAZ")); + assert_eq!("foo_bar_baz", &to_snake("FooBarBAZ")); + assert_eq!("xml_http_request", &to_snake("XMLHttpRequest")); + assert_eq!("r#while", &to_snake("While")); + assert_eq!("fuzz_buster", &to_snake("FUZZ_BUSTER")); + assert_eq!("foo_bar_baz", &to_snake("foo_bar_baz")); + assert_eq!("fuzz_buster", &to_snake("FUZZ_buster")); + assert_eq!("fuzz", &to_snake("_FUZZ")); + assert_eq!("fuzz", &to_snake("_fuzz")); + assert_eq!("fuzz", &to_snake("_Fuzz")); + assert_eq!("fuzz", &to_snake("FUZZ_")); + assert_eq!("fuzz", &to_snake("fuzz_")); + assert_eq!("fuzz", &to_snake("Fuzz_")); + assert_eq!("fuz_z", &to_snake("FuzZ_")); + + // From test_messages_proto3.proto. + assert_eq!("fieldname1", &to_snake("fieldname1")); + assert_eq!("field_name2", &to_snake("field_name2")); + assert_eq!("field_name3", &to_snake("_field_name3")); + assert_eq!("field_name4", &to_snake("field__name4_")); + assert_eq!("field0name5", &to_snake("field0name5")); + assert_eq!("field_0_name6", &to_snake("field_0_name6")); + assert_eq!("field_name7", &to_snake("fieldName7")); + assert_eq!("field_name8", &to_snake("FieldName8")); + assert_eq!("field_name9", &to_snake("field_Name9")); + assert_eq!("field_name10", &to_snake("Field_Name10")); + + // TODO(withoutboats/heck#3) + //assert_eq!("field_name11", &to_snake("FIELD_NAME11")); + assert_eq!("field_name12", &to_snake("FIELD_name12")); + assert_eq!("field_name13", &to_snake("__field_name13")); + assert_eq!("field_name14", &to_snake("__Field_name14")); + assert_eq!("field_name15", &to_snake("field__name15")); + assert_eq!("field_name16", &to_snake("field__Name16")); + assert_eq!("field_name17", &to_snake("field_name17__")); + assert_eq!("field_name18", &to_snake("Field_name18__")); + } + + #[test] + fn test_to_snake_raw_keyword() { + assert_eq!("r#as", &to_snake("as")); + assert_eq!("r#break", &to_snake("break")); + assert_eq!("r#const", &to_snake("const")); + assert_eq!("r#continue", &to_snake("continue")); + assert_eq!("r#else", &to_snake("else")); + assert_eq!("r#enum", &to_snake("enum")); + assert_eq!("r#false", &to_snake("false")); + assert_eq!("r#fn", &to_snake("fn")); + assert_eq!("r#for", &to_snake("for")); + assert_eq!("r#if", &to_snake("if")); + assert_eq!("r#impl", &to_snake("impl")); + assert_eq!("r#in", &to_snake("in")); + assert_eq!("r#let", &to_snake("let")); + assert_eq!("r#loop", &to_snake("loop")); + assert_eq!("r#match", &to_snake("match")); + assert_eq!("r#mod", &to_snake("mod")); + assert_eq!("r#move", &to_snake("move")); + assert_eq!("r#mut", &to_snake("mut")); + assert_eq!("r#pub", &to_snake("pub")); + assert_eq!("r#ref", &to_snake("ref")); + assert_eq!("r#return", &to_snake("return")); + assert_eq!("r#static", &to_snake("static")); + assert_eq!("r#struct", &to_snake("struct")); + assert_eq!("r#trait", &to_snake("trait")); + assert_eq!("r#true", &to_snake("true")); + assert_eq!("r#type", &to_snake("type")); + assert_eq!("r#unsafe", &to_snake("unsafe")); + assert_eq!("r#use", &to_snake("use")); + assert_eq!("r#where", &to_snake("where")); + assert_eq!("r#while", &to_snake("while")); + assert_eq!("r#dyn", &to_snake("dyn")); + assert_eq!("r#abstract", &to_snake("abstract")); + assert_eq!("r#become", &to_snake("become")); + assert_eq!("r#box", &to_snake("box")); + assert_eq!("r#do", &to_snake("do")); + assert_eq!("r#final", &to_snake("final")); + assert_eq!("r#macro", &to_snake("macro")); + assert_eq!("r#override", &to_snake("override")); + assert_eq!("r#priv", &to_snake("priv")); + assert_eq!("r#typeof", &to_snake("typeof")); + assert_eq!("r#unsized", &to_snake("unsized")); + assert_eq!("r#virtual", &to_snake("virtual")); + assert_eq!("r#yield", &to_snake("yield")); + assert_eq!("r#async", &to_snake("async")); + assert_eq!("r#await", &to_snake("await")); + assert_eq!("r#try", &to_snake("try")); + } + + #[test] + fn test_to_snake_non_raw_keyword() { + assert_eq!("self_", &to_snake("self")); + assert_eq!("super_", &to_snake("super")); + assert_eq!("extern_", &to_snake("extern")); + assert_eq!("crate_", &to_snake("crate")); + } + + #[test] + fn test_to_upper_camel() { + assert_eq!("", &to_upper_camel("")); + assert_eq!("F", &to_upper_camel("F")); + assert_eq!("Foo", &to_upper_camel("FOO")); + assert_eq!("FooBar", &to_upper_camel("FOO_BAR")); + assert_eq!("FooBar", &to_upper_camel("_FOO_BAR")); + assert_eq!("FooBar", &to_upper_camel("FOO_BAR_")); + assert_eq!("FooBar", &to_upper_camel("_FOO_BAR_")); + assert_eq!("FuzzBuster", &to_upper_camel("fuzzBuster")); + assert_eq!("FuzzBuster", &to_upper_camel("FuzzBuster")); + assert_eq!("Self_", &to_upper_camel("self")); + } +} diff --git a/prost-build/src/lib.rs b/prost-build/src/lib.rs new file mode 100644 index 0000000..7a81dab --- /dev/null +++ b/prost-build/src/lib.rs @@ -0,0 +1,1318 @@ +#![doc(html_root_url = "https://docs.rs/prost-build/0.10.1")] +#![allow(clippy::option_as_ref_deref)] + +//! `prost-build` compiles `.proto` files into Rust. +//! +//! `prost-build` is designed to be used for build-time code generation as part of a Cargo +//! build-script. +//! +//! ## Example +//! +//! Let's create a small crate, `snazzy`, that defines a collection of +//! snazzy new items in a protobuf file. +//! +//! ```bash +//! $ cargo new snazzy && cd snazzy +//! ``` +//! +//! First, add `prost-build`, `prost` and its public dependencies to `Cargo.toml` +//! (see [crates.io](https://crates.io/crates/prost) for the current versions): +//! +//! ```toml +//! [dependencies] +//! bytes = +//! prost = +//! +//! [build-dependencies] +//! prost-build = { version = } +//! ``` +//! +//! Next, add `src/items.proto` to the project: +//! +//! ```proto +//! syntax = "proto3"; +//! +//! package snazzy.items; +//! +//! // A snazzy new shirt! +//! message Shirt { +//! enum Size { +//! SMALL = 0; +//! MEDIUM = 1; +//! LARGE = 2; +//! } +//! +//! string color = 1; +//! Size size = 2; +//! } +//! ``` +//! +//! To generate Rust code from `items.proto`, we use `prost-build` in the crate's +//! `build.rs` build-script: +//! +//! ```rust,no_run +//! use std::io::Result; +//! fn main() -> Result<()> { +//! prost_build::compile_protos(&["src/items.proto"], &["src/"])?; +//! Ok(()) +//! } +//! ``` +//! +//! And finally, in `lib.rs`, include the generated code: +//! +//! ```rust,ignore +//! // Include the `items` module, which is generated from items.proto. +//! pub mod items { +//! include!(concat!(env!("OUT_DIR"), "/snazzy.items.rs")); +//! } +//! +//! pub fn create_large_shirt(color: String) -> items::Shirt { +//! let mut shirt = items::Shirt::default(); +//! shirt.color = color; +//! shirt.set_size(items::shirt::Size::Large); +//! shirt +//! } +//! ``` +//! +//! That's it! Run `cargo doc` to see documentation for the generated code. The full +//! example project can be found on [GitHub](https://github.com/danburkert/snazzy). +//! +//! ### Cleaning up Markdown in code docs +//! +//! If you are using protobuf files from third parties, where the author of the protobuf +//! is not treating comments as Markdown, or is, but has codeblocks in their docs, +//! then you may need to clean up the documentation in order that `cargo test --doc` +//! will not fail spuriously, and that `cargo doc` doesn't attempt to render the +//! codeblocks as Rust code. +//! +//! To do this, in your `Cargo.toml`, add `features = ["cleanup-markdown"]` to the inclusion +//! of the `prost-build` crate and when your code is generated, the code docs will automatically +//! be cleaned up a bit. +//! +//! ## Sourcing `protoc` +//! +//! `prost-build` depends on the Protocol Buffers compiler, `protoc`, to parse `.proto` files into +//! a representation that can be transformed into Rust. If set, `prost-build` uses the `PROTOC` and +//! `PROTOC_INCLUDE` environment variables for locating `protoc` and the Protobuf includes +//! directory. For example, on a macOS system where Protobuf is installed with Homebrew, set the +//! environment to: +//! +//! ```bash +//! PROTOC=/usr/local/bin/protoc +//! PROTOC_INCLUDE=/usr/local/include +//! ``` +//! +//! and in a typical Linux installation: +//! +//! ```bash +//! PROTOC=/usr/bin/protoc +//! PROTOC_INCLUDE=/usr/include +//! ``` +//! +//! If no `PROTOC` environment variable is set then `prost-build` will search the +//! current path for `protoc` or `protoc.exe`. If `protoc` is not found via these +//! two methods then `prost-build` will attempt to compile `protoc` from the bundled +//! source. +//! +//! If you would not like `prost-build` to not compile `protoc` from source ever then +//! ensure you have set `PROTOC_NO_VENDOR` environment variable as this will disable +//! compiling from source even if the `vendored` feature flag is enabled. +//! +//! If you would like to always compile from source then setting the `vendored` feature +//! flag will force `prost-build` to always build `protoc` from source. +//! +//! If `PROTOC_INCLUDE` is not found in the environment, then the Protobuf include directory +//! bundled in the prost-build crate is be used. +//! +//! ### Compiling `protoc` from source +//! +//! Compiling `protoc` from source requires a few external dependencies. Currently, +//! `prost-build` uses `cmake` to build `protoc`. For more information check out the +//! [protobuf build instructions][protobuf-build]. +//! +//! [protobuf-build]: https://github.com/protocolbuffers/protobuf/blob/master/src/README.md + +mod ast; +mod code_generator; +mod extern_paths; +mod ident; +mod message_graph; +mod path; + +use std::collections::HashMap; +use std::default; +use std::env; +use std::ffi::{OsStr, OsString}; +use std::fmt; +use std::fs; +use std::io::{Error, ErrorKind, Result, Write}; +use std::ops::RangeToInclusive; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use log::trace; +use prost::Message; +use prost_types::{FileDescriptorProto, FileDescriptorSet}; + +pub use crate::ast::{Comments, Method, Service}; +use crate::code_generator::CodeGenerator; +use crate::extern_paths::ExternPaths; +use crate::ident::to_snake; +use crate::message_graph::MessageGraph; +use crate::path::PathMap; + +/// A service generator takes a service descriptor and generates Rust code. +/// +/// `ServiceGenerator` can be used to generate application-specific interfaces +/// or implementations for Protobuf service definitions. +/// +/// Service generators are registered with a code generator using the +/// `Config::service_generator` method. +/// +/// A viable scenario is that an RPC framework provides a service generator. It generates a trait +/// describing methods of the service and some glue code to call the methods of the trait, defining +/// details like how errors are handled or if it is asynchronous. Then the user provides an +/// implementation of the generated trait in the application code and plugs it into the framework. +/// +/// Such framework isn't part of Prost at present. +pub trait ServiceGenerator { + /// Generates a Rust interface or implementation for a service, writing the + /// result to `buf`. + fn generate(&mut self, service: Service, buf: &mut String); + + /// Finalizes the generation process. + /// + /// In case there's something that needs to be output at the end of the generation process, it + /// goes here. Similar to [`generate`](#method.generate), the output should be appended to + /// `buf`. + /// + /// An example can be a module or other thing that needs to appear just once, not for each + /// service generated. + /// + /// This still can be called multiple times in a lifetime of the service generator, because it + /// is called once per `.proto` file. + /// + /// The default implementation is empty and does nothing. + fn finalize(&mut self, _buf: &mut String) {} + + /// Finalizes the generation process for an entire protobuf package. + /// + /// This differs from [`finalize`](#method.finalize) by where (and how often) it is called + /// during the service generator life cycle. This method is called once per protobuf package, + /// making it ideal for grouping services within a single package spread across multiple + /// `.proto` files. + /// + /// The default implementation is empty and does nothing. + fn finalize_package(&mut self, _package: &str, _buf: &mut String) {} +} + +/// The map collection type to output for Protobuf `map` fields. +#[non_exhaustive] +#[derive(Clone, Copy, Debug, PartialEq)] +enum MapType { + /// The [`std::collections::HashMap`] type. + HashMap, + /// The [`std::collections::BTreeMap`] type. + BTreeMap, +} + +impl Default for MapType { + fn default() -> MapType { + MapType::HashMap + } +} + +/// The bytes collection type to output for Protobuf `bytes` fields. +#[non_exhaustive] +#[derive(Clone, Debug, PartialEq)] +enum BytesType { + /// The [`ntex_bytes::Bytes`] type. + Bytes, + /// Custom type that implements [`ntex_grpc::BytesAdater`] trait + Custom(String), +} + +impl Default for BytesType { + fn default() -> BytesType { + BytesType::Bytes + } +} + +/// Configuration options for Protobuf code generation. +/// +/// This configuration builder can be used to set non-default code generation options. +pub struct Config { + file_descriptor_set_path: Option, + service_generator: Option>, + map_type: PathMap, + bytes_type: PathMap, + type_attributes: PathMap, + field_attributes: PathMap, + prost_types: bool, + strip_enum_prefix: bool, + out_dir: Option, + extern_paths: Vec<(String, String)>, + default_package_filename: String, + protoc_args: Vec, + disable_comments: PathMap<()>, + skip_protoc_run: bool, + include_file: Option, +} + +impl Config { + /// Creates a new code generator configuration with default options. + pub fn new() -> Config { + Config::default() + } + + /// Configure the code generator to generate Rust [`BTreeMap`][1] fields for Protobuf + /// [`map`][2] type fields. + /// + /// # Arguments + /// + /// **`paths`** - paths to specific fields, messages, or packages which should use a Rust + /// `BTreeMap` for Protobuf `map` fields. Paths are specified in terms of the Protobuf type + /// name (not the generated Rust type name). Paths with a leading `.` are treated as fully + /// qualified names. Paths without a leading `.` are treated as relative, and are suffix + /// matched on the fully qualified field name. If a Protobuf map field matches any of the + /// paths, a Rust `BTreeMap` field is generated instead of the default [`HashMap`][3]. + /// + /// The matching is done on the Protobuf names, before converting to Rust-friendly casing + /// standards. + /// + /// # Examples + /// + /// ```rust + /// # let mut config = prost_build::Config::new(); + /// // Match a specific field in a message type. + /// config.btree_map(&[".my_messages.MyMessageType.my_map_field"]); + /// + /// // Match all map fields in a message type. + /// config.btree_map(&[".my_messages.MyMessageType"]); + /// + /// // Match all map fields in a package. + /// config.btree_map(&[".my_messages"]); + /// + /// // Match all map fields. Specially useful in `no_std` contexts. + /// config.btree_map(&["."]); + /// + /// // Match all map fields in a nested message. + /// config.btree_map(&[".my_messages.MyMessageType.MyNestedMessageType"]); + /// + /// // Match all fields named 'my_map_field'. + /// config.btree_map(&["my_map_field"]); + /// + /// // Match all fields named 'my_map_field' in messages named 'MyMessageType', regardless of + /// // package or nesting. + /// config.btree_map(&["MyMessageType.my_map_field"]); + /// + /// // Match all fields named 'my_map_field', and all fields in the 'foo.bar' package. + /// config.btree_map(&["my_map_field", ".foo.bar"]); + /// ``` + /// + /// [1]: https://doc.rust-lang.org/std/collections/struct.BTreeMap.html + /// [2]: https://developers.google.com/protocol-buffers/docs/proto3#maps + /// [3]: https://doc.rust-lang.org/std/collections/struct.HashMap.html + pub fn btree_map(&mut self, paths: I) -> &mut Self + where + I: IntoIterator, + S: AsRef, + { + self.map_type.clear(); + for matcher in paths { + self.map_type + .insert(matcher.as_ref().to_string(), MapType::BTreeMap); + } + self + } + + /// Configure the code generator to generate Rust type fields for Protobuf [`bytes`][2] type fields. + /// + /// # Arguments + /// + /// **`paths`** - paths to specific fields, messages, or packages which should use a Rust + /// `Bytes` for Protobuf `bytes` fields. Paths are specified in terms of the Protobuf type + /// name (not the generated Rust type name). Paths with a leading `.` are treated as fully + /// qualified names. Paths without a leading `.` are treated as relative, and are suffix + /// matched on the fully qualified field name. If a Protobuf map field matches any of the + /// paths, a Rust `Bytes` field is generated instead of the default [`Vec`][3]. + /// + /// The matching is done on the Protobuf names, before converting to Rust-friendly casing + /// standards. + /// + /// # Examples + /// + /// ```rust + /// # let mut config = prost_build::Config::new(); + /// // Match a specific field in a message type. + /// config.bytes(&[".my_messages.MyMessageType.my_bytes_field"]); + /// + /// // Match all bytes fields in a message type. + /// config.bytes(&[".my_messages.MyMessageType"]); + /// + /// // Match all bytes fields in a package. + /// config.bytes(&[".my_messages"]); + /// + /// // Match all bytes fields. Specially useful in `no_std` contexts. + /// config.bytes(&["."]); + /// + /// // Match all bytes fields in a nested message. + /// config.bytes(&[".my_messages.MyMessageType.MyNestedMessageType"]); + /// + /// // Match all fields named 'my_bytes_field'. + /// config.bytes(&["my_bytes_field"]); + /// + /// // Match all fields named 'my_bytes_field' in messages named 'MyMessageType', regardless of + /// // package or nesting. + /// config.bytes(&["MyMessageType.my_bytes_field"]); + /// + /// // Match all fields named 'my_bytes_field', and all fields in the 'foo.bar' package. + /// config.bytes(&["my_bytes_field", ".foo.bar"]); + /// ``` + /// + /// [1]: https://docs.rs/bytes/latest/bytes/struct.Bytes.html + /// [2]: https://developers.google.com/protocol-buffers/docs/proto3#scalar + /// [3]: https://doc.rust-lang.org/std/vec/struct.Vec.html + pub fn bytes(&mut self, paths: I, tp: &str) -> &mut Self + where + I: IntoIterator, + S: AsRef, + { + self.bytes_type.clear(); + for matcher in paths { + self.bytes_type.insert( + matcher.as_ref().to_string(), + BytesType::Custom(tp.to_string()), + ); + } + self + } + + /// Add additional attribute to matched fields. + /// + /// # Arguments + /// + /// **`path`** - a path matching any number of fields. These fields get the attribute. + /// For details about matching fields see [`btree_map`](#method.btree_map). + /// + /// **`attribute`** - an arbitrary string that'll be placed before each matched field. The + /// expected usage are additional attributes, usually in concert with whole-type + /// attributes set with [`type_attribute`](method.type_attribute), but it is not + /// checked and anything can be put there. + /// + /// Note that the calls to this method are cumulative ‒ if multiple paths from multiple calls + /// match the same field, the field gets all the corresponding attributes. + /// + /// # Examples + /// + /// ```rust + /// # let mut config = prost_build::Config::new(); + /// // Prost renames fields named `in` to `in_`. But if serialized through serde, + /// // they should as `in`. + /// config.field_attribute("in", "#[serde(rename = \"in\")]"); + /// ``` + pub fn field_attribute(&mut self, path: P, attribute: A) -> &mut Self + where + P: AsRef, + A: AsRef, + { + self.field_attributes + .insert(path.as_ref().to_string(), attribute.as_ref().to_string()); + self + } + + /// Add additional attribute to matched messages, enums and one-ofs. + /// + /// # Arguments + /// + /// **`paths`** - a path matching any number of types. It works the same way as in + /// [`btree_map`](#method.btree_map), just with the field name omitted. + /// + /// **`attribute`** - an arbitrary string to be placed before each matched type. The + /// expected usage are additional attributes, but anything is allowed. + /// + /// The calls to this method are cumulative. They don't overwrite previous calls and if a + /// type is matched by multiple calls of the method, all relevant attributes are added to + /// it. + /// + /// For things like serde it might be needed to combine with [field + /// attributes](#method.field_attribute). + /// + /// # Examples + /// + /// ```rust + /// # let mut config = prost_build::Config::new(); + /// // Nothing around uses floats, so we can derive real `Eq` in addition to `PartialEq`. + /// config.type_attribute(".", "#[derive(Eq)]"); + /// // Some messages want to be serializable with serde as well. + /// config.type_attribute("my_messages.MyMessageType", + /// "#[derive(Serialize)] #[serde(rename_all = \"snake_case\")]"); + /// config.type_attribute("my_messages.MyMessageType.MyNestedMessageType", + /// "#[derive(Serialize)] #[serde(rename_all = \"snake_case\")]"); + /// ``` + /// + /// # Oneof fields + /// + /// The `oneof` fields don't have a type name of their own inside Protobuf. Therefore, the + /// field name can be used both with `type_attribute` and `field_attribute` ‒ the first is + /// placed before the `enum` type definition, the other before the field inside corresponding + /// message `struct`. + /// + /// In other words, to place an attribute on the `enum` implementing the `oneof`, the match + /// would look like `my_messages.MyMessageType.oneofname`. + pub fn type_attribute(&mut self, path: P, attribute: A) -> &mut Self + where + P: AsRef, + A: AsRef, + { + self.type_attributes + .insert(path.as_ref().to_string(), attribute.as_ref().to_string()); + self + } + + /// Configures the code generator to use the provided service generator. + pub fn service_generator( + &mut self, + service_generator: Box, + ) -> &mut Self { + self.service_generator = Some(service_generator); + self + } + + /// Configures the code generator to not use the `prost_types` crate for Protobuf well-known + /// types, and instead generate Protobuf well-known types from their `.proto` definitions. + pub fn compile_well_known_types(&mut self) -> &mut Self { + self.prost_types = false; + self + } + + /// Configures the code generator to omit documentation comments on generated Protobuf types. + /// + /// # Example + /// + /// Occasionally `.proto` files contain code blocks which are not valid Rust. To avoid doctest + /// failures, annotate the invalid code blocks with an [`ignore` or `no_run` attribute][1], or + /// disable doctests for the crate with a [Cargo.toml entry][2]. If neither of these options + /// are possible, then omit comments on generated code during doctest builds: + /// + /// ```rust,ignore + /// let mut config = prost_build::Config::new(); + /// config.disable_comments("."); + /// config.compile_protos(&["src/frontend.proto", "src/backend.proto"], &["src"])?; + /// ``` + /// + /// As with other options which take a set of paths, comments can be disabled on a per-package + /// or per-symbol basis. + /// + /// [1]: https://doc.rust-lang.org/rustdoc/documentation-tests.html#attributes + /// [2]: https://doc.rust-lang.org/cargo/reference/cargo-targets.html#configuring-a-target + pub fn disable_comments(&mut self, paths: I) -> &mut Self + where + I: IntoIterator, + S: AsRef, + { + self.disable_comments.clear(); + for matcher in paths { + self.disable_comments + .insert(matcher.as_ref().to_string(), ()); + } + self + } + + /// Declare an externally provided Protobuf package or type. + /// + /// `extern_path` allows `prost` types in external crates to be referenced in generated code. + /// + /// When `prost` compiles a `.proto` which includes an import of another `.proto`, it will + /// automatically recursively compile the imported file as well. `extern_path` can be used + /// to instead substitute types from an external crate. + /// + /// # Example + /// + /// As an example, consider a crate, `uuid`, with a `prost`-generated `Uuid` type: + /// + /// ```proto + /// // uuid.proto + /// + /// syntax = "proto3"; + /// package uuid; + /// + /// message Uuid { + /// string uuid_str = 1; + /// } + /// ``` + /// + /// The `uuid` crate implements some traits for `Uuid`, and publicly exports it: + /// + /// ```rust,ignore + /// // lib.rs in the uuid crate + /// + /// include!(concat!(env!("OUT_DIR"), "/uuid.rs")); + /// + /// pub trait DoSomething { + /// fn do_it(&self); + /// } + /// + /// impl DoSomething for Uuid { + /// fn do_it(&self) { + /// println!("Done"); + /// } + /// } + /// ``` + /// + /// A separate crate, `my_application`, uses `prost` to generate message types which reference + /// `Uuid`: + /// + /// ```proto + /// // my_application.proto + /// + /// syntax = "proto3"; + /// package my_application; + /// + /// import "uuid.proto"; + /// + /// message MyMessage { + /// uuid.Uuid message_id = 1; + /// string some_payload = 2; + /// } + /// ``` + /// + /// Additionally, `my_application` depends on the trait impls provided by the `uuid` crate: + /// + /// ```rust,ignore + /// // `main.rs` of `my_application` + /// + /// use uuid::{DoSomething, Uuid}; + /// + /// include!(concat!(env!("OUT_DIR"), "/my_application.rs")); + /// + /// pub fn process_message(msg: MyMessage) { + /// if let Some(uuid) = msg.message_id { + /// uuid.do_it(); + /// } + /// } + /// ``` + /// + /// Without configuring `uuid` as an external path in `my_application`'s `build.rs`, `prost` + /// would compile a completely separate version of the `Uuid` type, and `process_message` would + /// fail to compile. However, if `my_application` configures `uuid` as an extern path with a + /// call to `.extern_path(".uuid", "::uuid")`, `prost` will use the external type instead of + /// compiling a new version of `Uuid`. Note that the configuration could also be specified as + /// `.extern_path(".uuid.Uuid", "::uuid::Uuid")` if only the `Uuid` type were externally + /// provided, and not the whole `uuid` package. + /// + /// # Usage + /// + /// `extern_path` takes a fully-qualified Protobuf path, and the corresponding Rust path that + /// it will be substituted with in generated code. The Protobuf path can refer to a package or + /// a type, and the Rust path should correspondingly refer to a Rust module or type. + /// + /// ```rust + /// # let mut config = prost_build::Config::new(); + /// // Declare the `uuid` Protobuf package and all nested packages and types as externally + /// // provided by the `uuid` crate. + /// config.extern_path(".uuid", "::uuid"); + /// + /// // Declare the `foo.bar.baz` Protobuf package and all nested packages and types as + /// // externally provided by the `foo_bar_baz` crate. + /// config.extern_path(".foo.bar.baz", "::foo_bar_baz"); + /// + /// // Declare the `uuid.Uuid` Protobuf type (and all nested types) as externally provided + /// // by the `uuid` crate's `Uuid` type. + /// config.extern_path(".uuid.Uuid", "::uuid::Uuid"); + /// ``` + pub fn extern_path(&mut self, proto_path: P1, rust_path: P2) -> &mut Self + where + P1: Into, + P2: Into, + { + self.extern_paths + .push((proto_path.into(), rust_path.into())); + self + } + + /// When set, the `FileDescriptorSet` generated by `protoc` is written to the provided + /// filesystem path. + /// + /// This option can be used in conjunction with the [`include_bytes!`] macro and the types in + /// the `prost-types` crate for implementing reflection capabilities, among other things. + /// + /// ## Example + /// + /// In `build.rs`: + /// + /// ```rust + /// # use std::env; + /// # use std::path::PathBuf; + /// # let mut config = prost_build::Config::new(); + /// config.file_descriptor_set_path( + /// PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR environment variable not set")) + /// .join("file_descriptor_set.bin")); + /// ``` + /// + /// In `lib.rs`: + /// + /// ```rust,ignore + /// let file_descriptor_set_bytes = include_bytes!(concat!(env!("OUT_DIR"), "/file_descriptor_set.bin")); + /// let file_descriptor_set = prost_types::FileDescriptorSet::decode(&file_descriptor_set_bytes[..]).unwrap(); + /// ``` + pub fn file_descriptor_set_path

(&mut self, path: P) -> &mut Self + where + P: Into, + { + self.file_descriptor_set_path = Some(path.into()); + self + } + + /// In combination with with `file_descriptor_set_path`, this can be used to provide a file + /// descriptor set as an input file, rather than having prost-build generate the file by calling + /// protoc. Prost-build does require that the descriptor set was generated with + /// --include_source_info. + /// + /// In `build.rs`: + /// + /// ```rust + /// # let mut config = prost_build::Config::new(); + /// config.file_descriptor_set_path("path/from/build/system") + /// .skip_protoc_run() + /// .compile_protos(&["src/items.proto"], &["src/"]); + /// ``` + /// + pub fn skip_protoc_run(&mut self) -> &mut Self { + self.skip_protoc_run = true; + self + } + + /// Configures the code generator to not strip the enum name from variant names. + /// + /// Protobuf enum definitions commonly include the enum name as a prefix of every variant name. + /// This style is non-idiomatic in Rust, so by default `prost` strips the enum name prefix from + /// variants which include it. Configuring this option prevents `prost` from stripping the + /// prefix. + pub fn retain_enum_prefix(&mut self) -> &mut Self { + self.strip_enum_prefix = false; + self + } + + /// Configures the output directory where generated Rust files will be written. + /// + /// If unset, defaults to the `OUT_DIR` environment variable. `OUT_DIR` is set by Cargo when + /// executing build scripts, so `out_dir` typically does not need to be configured. + pub fn out_dir

(&mut self, path: P) -> &mut Self + where + P: Into, + { + self.out_dir = Some(path.into()); + self + } + + /// Configures what filename protobufs with no package definition are written to. + pub fn default_package_filename(&mut self, filename: S) -> &mut Self + where + S: Into, + { + self.default_package_filename = filename.into(); + self + } + + /// Add an argument to the `protoc` protobuf compilation invocation. + /// + /// # Example `build.rs` + /// + /// ```rust,no_run + /// # use std::io::Result; + /// fn main() -> Result<()> { + /// let mut prost_build = prost_build::Config::new(); + /// // Enable a protoc experimental feature. + /// prost_build.protoc_arg("--experimental_allow_proto3_optional"); + /// prost_build.compile_protos(&["src/frontend.proto", "src/backend.proto"], &["src"])?; + /// Ok(()) + /// } + /// ``` + pub fn protoc_arg(&mut self, arg: S) -> &mut Self + where + S: AsRef, + { + self.protoc_args.push(arg.as_ref().to_owned()); + self + } + + /// Configures the optional module filename for easy inclusion of all generated Rust files + /// + /// If set, generates a file (inside the `OUT_DIR` or `out_dir()` as appropriate) which contains + /// a set of `pub mod XXX` statements combining to load all Rust files generated. This can allow + /// for a shortcut where multiple related proto files have been compiled together resulting in + /// a semi-complex set of includes. + /// + /// Turning a need for: + /// + /// ```rust,no_run,ignore + /// pub mod Foo { + /// pub mod Bar { + /// include!(concat!(env!("OUT_DIR"), "/foo.bar.rs")); + /// } + /// pub mod Baz { + /// include!(concat!(env!("OUT_DIR"), "/foo.baz.rs")); + /// } + /// } + /// ``` + /// + /// Into the simpler: + /// + /// ```rust,no_run,ignore + /// include!(concat!(env!("OUT_DIR"), "/_includes.rs")); + /// ``` + pub fn include_file

(&mut self, path: P) -> &mut Self + where + P: Into, + { + self.include_file = Some(path.into()); + self + } + + /// Compile `.proto` files into Rust files during a Cargo build with additional code generator + /// configuration options. + /// + /// This method is like the `prost_build::compile_protos` function, with the added ability to + /// specify non-default code generation options. See that function for more information about + /// the arguments and generated outputs. + /// + /// The `protos` and `includes` arguments are ignored if `skip_protoc_run` is specified. + /// + /// # Example `build.rs` + /// + /// ```rust,no_run + /// # use std::io::Result; + /// fn main() -> Result<()> { + /// let mut prost_build = prost_build::Config::new(); + /// prost_build.btree_map(&["."]); + /// prost_build.compile_protos(&["src/frontend.proto", "src/backend.proto"], &["src"])?; + /// Ok(()) + /// } + /// ``` + pub fn compile_protos( + &mut self, + protos: &[impl AsRef], + includes: &[impl AsRef], + ) -> Result<()> { + let mut target_is_env = false; + let target: PathBuf = self.out_dir.clone().map(Ok).unwrap_or_else(|| { + env::var_os("OUT_DIR") + .ok_or_else(|| { + Error::new(ErrorKind::Other, "OUT_DIR environment variable is not set") + }) + .map(|val| { + target_is_env = true; + Into::into(val) + }) + })?; + + // TODO: This should probably emit 'rerun-if-changed=PATH' directives for cargo, however + // according to [1] if any are output then those paths replace the default crate root, + // which is undesirable. Figure out how to do it in an additive way; perhaps gcc-rs has + // this figured out. + // [1]: http://doc.crates.io/build-script.html#outputs-of-the-build-script + + let tmp; + let file_descriptor_set_path = if let Some(path) = &self.file_descriptor_set_path { + path.clone() + } else { + if self.skip_protoc_run { + return Err(Error::new( + ErrorKind::Other, + "file_descriptor_set_path is required with skip_protoc_run", + )); + } + tmp = tempfile::Builder::new().prefix("prost-build").tempdir()?; + tmp.path().join("prost-descriptor-set") + }; + + if !self.skip_protoc_run { + let mut cmd = Command::new(protoc()); + cmd.arg("--include_imports") + .arg("--include_source_info") + .arg("-o") + .arg(&file_descriptor_set_path); + + for include in includes { + cmd.arg("-I").arg(include.as_ref()); + } + + // Set the protoc include after the user includes in case the user wants to + // override one of the built-in .protos. + cmd.arg("-I").arg(protoc_include()); + + for arg in &self.protoc_args { + cmd.arg(arg); + } + + for proto in protos { + cmd.arg(proto.as_ref()); + } + + let output = cmd.output().map_err(|error| { + Error::new( + error.kind(), + format!("failed to invoke protoc (hint: https://docs.rs/prost-build/#sourcing-protoc): {}", error), + ) + })?; + + if !output.status.success() { + return Err(Error::new( + ErrorKind::Other, + format!("protoc failed: {}", String::from_utf8_lossy(&output.stderr)), + )); + } + } + + let buf = fs::read(file_descriptor_set_path)?; + let file_descriptor_set = FileDescriptorSet::decode(&*buf).map_err(|error| { + Error::new( + ErrorKind::InvalidInput, + format!("invalid FileDescriptorSet: {}", error), + ) + })?; + + let requests = file_descriptor_set + .file + .into_iter() + .map(|descriptor| { + ( + Module::from_protobuf_package_name(descriptor.package()), + descriptor, + ) + }) + .collect::>(); + + let file_names = requests + .iter() + .map(|req| { + ( + req.0.clone(), + req.0.to_file_name_or(&self.default_package_filename), + ) + }) + .collect::>(); + + let modules = self.generate(requests)?; + for (module, content) in &modules { + let file_name = file_names + .get(module) + .expect("every module should have a filename"); + let output_path = target.join(file_name); + + let previous_content = fs::read(&output_path); + + if previous_content + .map(|previous_content| previous_content == content.as_bytes()) + .unwrap_or(false) + { + trace!("unchanged: {:?}", file_name); + } else { + trace!("writing: {:?}", file_name); + fs::write(output_path, content)?; + } + } + + if let Some(ref include_file) = self.include_file { + trace!("Writing include file: {:?}", target.join(include_file)); + let mut file = fs::File::create(target.join(include_file))?; + self.write_includes( + modules.keys().collect(), + &mut file, + 0, + if target_is_env { None } else { Some(&target) }, + )?; + file.flush()?; + } + + Ok(()) + } + + fn write_includes( + &self, + mut entries: Vec<&Module>, + outfile: &mut fs::File, + depth: usize, + basepath: Option<&PathBuf>, + ) -> Result { + let mut written = 0; + while !entries.is_empty() { + let modident = entries[0].part(depth); + let matching: Vec<&Module> = entries + .iter() + .filter(|&v| v.part(depth) == modident) + .copied() + .collect(); + { + // Will NLL sort this mess out? + let _temp = entries + .drain(..) + .filter(|&v| v.part(depth) != modident) + .collect(); + entries = _temp; + } + self.write_line(outfile, depth, &format!("pub mod {} {{", modident))?; + let subwritten = self.write_includes( + matching + .iter() + .filter(|v| v.len() > depth + 1) + .copied() + .collect(), + outfile, + depth + 1, + basepath, + )?; + written += subwritten; + if subwritten != matching.len() { + let modname = matching[0].to_partial_file_name(..=depth); + if basepath.is_some() { + self.write_line( + outfile, + depth + 1, + &format!("include!(\"{}.rs\");", modname), + )?; + } else { + self.write_line( + outfile, + depth + 1, + &format!("include!(concat!(env!(\"OUT_DIR\"), \"/{}.rs\"));", modname), + )?; + } + written += 1; + } + + self.write_line(outfile, depth, "}")?; + } + Ok(written) + } + + fn write_line(&self, outfile: &mut fs::File, depth: usize, line: &str) -> Result<()> { + outfile.write_all(format!("{}{}\n", (" ").to_owned().repeat(depth), line).as_bytes()) + } + + /// Processes a set of modules and file descriptors, returning a map of modules to generated + /// code contents. + /// + /// This is generally used when control over the output should not be managed by Prost, + /// such as in a flow for a `protoc` code generating plugin. When compiling as part of a + /// `build.rs` file, instead use [`compile_protos()`]. + pub fn generate( + &mut self, + requests: Vec<(Module, FileDescriptorProto)>, + ) -> Result> { + let mut modules = HashMap::new(); + let mut packages = HashMap::new(); + + let message_graph = MessageGraph::new(requests.iter().map(|x| &x.1)) + .map_err(|error| Error::new(ErrorKind::InvalidInput, error))?; + let extern_paths = ExternPaths::new(&self.extern_paths, self.prost_types) + .map_err(|error| Error::new(ErrorKind::InvalidInput, error))?; + + for request in requests { + // Only record packages that have services + if !request.1.service.is_empty() { + packages.insert(request.0.clone(), request.1.package().to_string()); + } + + let buf = modules.entry(request.0).or_insert_with(String::new); + CodeGenerator::generate(self, &message_graph, &extern_paths, request.1, buf); + } + + if let Some(ref mut service_generator) = self.service_generator { + for (module, package) in packages { + let buf = modules.get_mut(&module).unwrap(); + service_generator.finalize_package(&package, buf); + } + } + + Ok(modules) + } +} + +impl default::Default for Config { + fn default() -> Config { + Config { + file_descriptor_set_path: None, + service_generator: None, + map_type: PathMap::default(), + bytes_type: PathMap::default(), + type_attributes: PathMap::default(), + field_attributes: PathMap::default(), + prost_types: true, + strip_enum_prefix: true, + out_dir: None, + extern_paths: Vec::new(), + default_package_filename: "_".to_string(), + protoc_args: Vec::new(), + disable_comments: PathMap::default(), + skip_protoc_run: false, + include_file: None, + } + } +} + +impl fmt::Debug for Config { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Config") + .field("file_descriptor_set_path", &self.file_descriptor_set_path) + .field("service_generator", &self.service_generator.is_some()) + .field("map_type", &self.map_type) + .field("bytes_type", &self.bytes_type) + .field("type_attributes", &self.type_attributes) + .field("field_attributes", &self.field_attributes) + .field("prost_types", &self.prost_types) + .field("strip_enum_prefix", &self.strip_enum_prefix) + .field("out_dir", &self.out_dir) + .field("extern_paths", &self.extern_paths) + .field("default_package_filename", &self.default_package_filename) + .field("protoc_args", &self.protoc_args) + .field("disable_comments", &self.disable_comments) + .finish() + } +} + +/// A Rust module path for a Protobuf package. +#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct Module { + components: Vec, +} + +impl Module { + /// Construct a module path from an iterator of parts. + pub fn from_parts(parts: I) -> Self + where + I: IntoIterator, + I::Item: Into, + { + Self { + components: parts.into_iter().map(|s| s.into()).collect(), + } + } + + /// Construct a module path from a Protobuf package name. + /// + /// Constituent parts are automatically converted to snake case in order to follow + /// Rust module naming conventions. + pub fn from_protobuf_package_name(name: &str) -> Self { + Self { + components: name + .split('.') + .filter(|s| !s.is_empty()) + .map(to_snake) + .collect(), + } + } + + /// An iterator over the parts of the path. + pub fn parts(&self) -> impl Iterator { + self.components.iter().map(|s| s.as_str()) + } + + /// Format the module path into a filename for generated Rust code. + /// + /// If the module path is empty, `default` is used to provide the root of the filename. + pub fn to_file_name_or(&self, default: &str) -> String { + let mut root = if self.components.is_empty() { + default.to_owned() + } else { + self.components.join(".") + }; + + root.push_str(".rs"); + + root + } + + /// The number of parts in the module's path. + pub fn len(&self) -> usize { + self.components.len() + } + + /// Whether the module's path contains any components. + pub fn is_empty(&self) -> bool { + self.components.is_empty() + } + + fn to_partial_file_name(&self, range: RangeToInclusive) -> String { + self.components[range].join(".") + } + + fn part(&self, idx: usize) -> &str { + self.components[idx].as_str() + } +} + +impl fmt::Display for Module { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut parts = self.parts(); + if let Some(first) = parts.next() { + f.write_str(first)?; + } + for part in parts { + f.write_str("::")?; + f.write_str(part)?; + } + Ok(()) + } +} + +/// Compile `.proto` files into Rust files during a Cargo build. +/// +/// The generated `.rs` files are written to the Cargo `OUT_DIR` directory, suitable for use with +/// the [include!][1] macro. See the [Cargo `build.rs` code generation][2] example for more info. +/// +/// This function should be called in a project's `build.rs`. +/// +/// # Arguments +/// +/// **`protos`** - Paths to `.proto` files to compile. Any transitively [imported][3] `.proto` +/// files are automatically be included. +/// +/// **`includes`** - Paths to directories in which to search for imports. Directories are searched +/// in order. The `.proto` files passed in **`protos`** must be found in one of the provided +/// include directories. +/// +/// # Errors +/// +/// This function can fail for a number of reasons: +/// +/// - Failure to locate or download `protoc`. +/// - Failure to parse the `.proto`s. +/// - Failure to locate an imported `.proto`. +/// - Failure to compile a `.proto` without a [package specifier][4]. +/// +/// It's expected that this function call be `unwrap`ed in a `build.rs`; there is typically no +/// reason to gracefully recover from errors during a build. +/// +/// # Example `build.rs` +/// +/// ```rust,no_run +/// # use std::io::Result; +/// fn main() -> Result<()> { +/// prost_build::compile_protos(&["src/frontend.proto", "src/backend.proto"], &["src"])?; +/// Ok(()) +/// } +/// ``` +/// +/// [1]: https://doc.rust-lang.org/std/macro.include.html +/// [2]: http://doc.crates.io/build-script.html#case-study-code-generation +/// [3]: https://developers.google.com/protocol-buffers/docs/proto3#importing-definitions +/// [4]: https://developers.google.com/protocol-buffers/docs/proto#packages +pub fn compile_protos(protos: &[impl AsRef], includes: &[impl AsRef]) -> Result<()> { + Config::new().compile_protos(protos, includes) +} + +/// Returns the path to the `protoc` binary. +pub fn protoc() -> PathBuf { + match env::var_os("PROTOC") { + Some(protoc) => PathBuf::from(protoc), + None => PathBuf::from(env!("PROTOC")), + } +} + +/// Returns the path to the Protobuf include directory. +pub fn protoc_include() -> PathBuf { + match env::var_os("PROTOC_INCLUDE") { + Some(include) => PathBuf::from(include), + None => PathBuf::from(env!("PROTOC_INCLUDE")), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::cell::RefCell; + use std::rc::Rc; + + /// An example service generator that generates a trait with methods corresponding to the + /// service methods. + struct ServiceTraitGenerator; + impl ServiceGenerator for ServiceTraitGenerator { + fn generate(&mut self, service: Service, buf: &mut String) { + // Generate a trait for the service. + service.comments.append_with_indent(0, buf); + buf.push_str(&format!("trait {} {{\n", &service.name)); + + // Generate the service methods. + for method in service.methods { + method.comments.append_with_indent(1, buf); + buf.push_str(&format!( + " fn {}({}) -> {};\n", + method.name, method.input_type, method.output_type + )); + } + + // Close out the trait. + buf.push_str("}\n"); + } + fn finalize(&mut self, buf: &mut String) { + // Needs to be present only once, no matter how many services there are + buf.push_str("pub mod utils { }\n"); + } + } + + /// Implements `ServiceGenerator` and provides some state for assertions. + struct MockServiceGenerator { + state: Rc>, + } + + /// Holds state for `MockServiceGenerator` + #[derive(Default)] + struct MockState { + service_names: Vec, + package_names: Vec, + finalized: u32, + } + + impl MockServiceGenerator { + fn new(state: Rc>) -> Self { + Self { state } + } + } + + impl ServiceGenerator for MockServiceGenerator { + fn generate(&mut self, service: Service, _buf: &mut String) { + let mut state = self.state.borrow_mut(); + state.service_names.push(service.name); + } + + fn finalize(&mut self, _buf: &mut String) { + let mut state = self.state.borrow_mut(); + state.finalized += 1; + } + + fn finalize_package(&mut self, package: &str, _buf: &mut String) { + let mut state = self.state.borrow_mut(); + state.package_names.push(package.to_string()); + } + } + + #[test] + fn smoke_test() { + let _ = env_logger::try_init(); + Config::new() + .service_generator(Box::new(ServiceTraitGenerator)) + .compile_protos(&["src/smoke_test.proto"], &["src"]) + .unwrap(); + } + + #[test] + fn finalize_package() { + let _ = env_logger::try_init(); + + let state = Rc::new(RefCell::new(MockState::default())); + let gen = MockServiceGenerator::new(Rc::clone(&state)); + + Config::new() + .service_generator(Box::new(gen)) + .include_file("_protos.rs") + .compile_protos(&["src/hello.proto", "src/goodbye.proto"], &["src"]) + .unwrap(); + + let state = state.borrow(); + assert_eq!(&state.service_names, &["Greeting", "Farewell"]); + assert_eq!(&state.package_names, &["helloworld"]); + assert_eq!(state.finalized, 3); + } +} diff --git a/prost-build/src/message_graph.rs b/prost-build/src/message_graph.rs new file mode 100644 index 0000000..ac0ad15 --- /dev/null +++ b/prost-build/src/message_graph.rs @@ -0,0 +1,87 @@ +use std::collections::HashMap; + +use petgraph::algo::has_path_connecting; +use petgraph::graph::NodeIndex; +use petgraph::Graph; + +use prost_types::{field_descriptor_proto, DescriptorProto, FileDescriptorProto}; + +/// `MessageGraph` builds a graph of messages whose edges correspond to nesting. +/// The goal is to recognize when message types are recursively nested, so +/// that fields can be boxed when necessary. +pub struct MessageGraph { + index: HashMap, + graph: Graph, +} + +impl MessageGraph { + pub fn new<'a>( + files: impl Iterator, + ) -> Result { + let mut msg_graph = MessageGraph { + index: HashMap::new(), + graph: Graph::new(), + }; + + for file in files { + let package = format!( + "{}{}", + if file.package.is_some() { "." } else { "" }, + file.package.as_ref().map(String::as_str).unwrap_or("") + ); + for msg in &file.message_type { + msg_graph.add_message(&package, msg); + } + } + + Ok(msg_graph) + } + + fn get_or_insert_index(&mut self, msg_name: String) -> NodeIndex { + let MessageGraph { + ref mut index, + ref mut graph, + } = *self; + assert_eq!(b'.', msg_name.as_bytes()[0]); + *index + .entry(msg_name.clone()) + .or_insert_with(|| graph.add_node(msg_name)) + } + + /// Adds message to graph IFF it contains a non-repeated field containing another message. + /// The purpose of the message graph is detecting recursively nested messages and co-recursively nested messages. + /// Because prost does not box message fields, recursively nested messages would not compile in Rust. + /// To allow recursive messages, the message graph is used to detect recursion and automatically box the recursive field. + /// Since repeated messages are already put in a Vec, boxing them isn’t necessary even if the reference is recursive. + fn add_message(&mut self, package: &str, msg: &DescriptorProto) { + let msg_name = format!("{}.{}", package, msg.name.as_ref().unwrap()); + let msg_index = self.get_or_insert_index(msg_name.clone()); + + for field in &msg.field { + if field.r#type() == field_descriptor_proto::Type::Message + && field.label() != field_descriptor_proto::Label::Repeated + { + let field_index = self.get_or_insert_index(field.type_name.clone().unwrap()); + self.graph.add_edge(msg_index, field_index, ()); + } + } + + for msg in &msg.nested_type { + self.add_message(&msg_name, msg); + } + } + + /// Returns true if message type `inner` is nested in message type `outer`. + pub fn is_nested(&self, outer: &str, inner: &str) -> bool { + let outer = match self.index.get(outer) { + Some(outer) => *outer, + None => return false, + }; + let inner = match self.index.get(inner) { + Some(inner) => *inner, + None => return false, + }; + + has_path_connecting(&self.graph, outer, inner, None) + } +} diff --git a/prost-build/src/path.rs b/prost-build/src/path.rs new file mode 100644 index 0000000..ed8fef1 --- /dev/null +++ b/prost-build/src/path.rs @@ -0,0 +1,250 @@ +//! Utilities for working with Protobuf paths. + +use std::iter; + +/// Maps a fully-qualified Protobuf path to a value using path matchers. +#[derive(Debug, Default)] +pub(crate) struct PathMap { + // insertion order might actually matter (to avoid warning about legacy-derive-helpers) + // see: https://doc.rust-lang.org/rustc/lints/listing/warn-by-default.html#legacy-derive-helpers + pub(crate) matchers: Vec<(String, T)>, +} + +impl PathMap { + /// Inserts a new matcher and associated value to the path map. + pub(crate) fn insert(&mut self, matcher: String, value: T) { + self.matchers.push((matcher, value)); + } + + /// Returns a iterator over all the value matching the given fd_path and associated suffix/prefix path + pub(crate) fn get(&self, fq_path: &str) -> Iter<'_, T> { + Iter::new(self, fq_path.to_string()) + } + + /// Returns a iterator over all the value matching the path `fq_path.field` and associated suffix/prefix path + pub(crate) fn get_field(&self, fq_path: &str, field: &str) -> Iter<'_, T> { + Iter::new(self, format!("{}.{}", fq_path, field)) + } + + /// Returns the first value found matching the given path + /// If nothing matches the path, suffix paths will be tried, then prefix paths, then the global path + #[allow(unused)] + pub(crate) fn get_first<'a>(&'a self, fq_path: &'_ str) -> Option<&'a T> { + self.find_best_matching(fq_path) + } + + /// Returns the first value found matching the path `fq_path.field` + /// If nothing matches the path, suffix paths will be tried, then prefix paths, then the global path + pub(crate) fn get_first_field<'a>( + &'a self, + fq_path: &'_ str, + field: &'_ str, + ) -> Option<&'a T> { + self.find_best_matching(&format!("{}.{}", fq_path, field)) + } + + /// Removes all matchers from the path map. + pub(crate) fn clear(&mut self) { + self.matchers.clear(); + } + + /// Returns the first value found best matching the path + /// See [sub_path_iter()] for paths test order + fn find_best_matching(&self, full_path: &str) -> Option<&T> { + sub_path_iter(full_path).find_map(|path| { + self.matchers + .iter() + .find(|(p, _)| p == path) + .map(|(_, v)| v) + }) + } +} + +/// Iterator inside a PathMap that only returns values that matches a given path +pub(crate) struct Iter<'a, T> { + iter: std::slice::Iter<'a, (String, T)>, + path: String, +} + +impl<'a, T> Iter<'a, T> { + fn new(map: &'a PathMap, path: String) -> Self { + Self { + iter: map.matchers.iter(), + path, + } + } + + fn is_match(&self, path: &str) -> bool { + sub_path_iter(self.path.as_str()).any(|p| p == path) + } +} + +impl<'a, T> std::iter::Iterator for Iter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + loop { + match self.iter.next() { + Some((p, v)) => { + if self.is_match(p) { + return Some(v); + } + } + None => return None, + } + } + } +} + +impl<'a, T> std::iter::FusedIterator for Iter<'a, T> {} + +/// Given a fully-qualified path, returns a sequence of paths: +/// - the path itself +/// - the sequence of suffix paths +/// - the sequence of prefix paths +/// - the global path +/// +/// Example: sub_path_iter(".a.b.c") -> [".a.b.c", "a.b.c", "b.c", "c", ".a.b", ".a", "."] +fn sub_path_iter(full_path: &str) -> impl Iterator { + // First, try matching the path. + iter::once(full_path) + // Then, try matching path suffixes. + .chain(suffixes(full_path)) + // Then, try matching path prefixes. + .chain(prefixes(full_path)) + // Then, match the global path. + .chain(iter::once(".")) +} + +/// Given a fully-qualified path, returns a sequence of fully-qualified paths which match a prefix +/// of the input path, in decreasing path-length order. +/// +/// Example: prefixes(".a.b.c.d") -> [".a.b.c", ".a.b", ".a"] +fn prefixes(fq_path: &str) -> impl Iterator { + std::iter::successors(Some(fq_path), |path| { + #[allow(unknown_lints, clippy::manual_split_once)] + path.rsplitn(2, '.').nth(1).filter(|path| !path.is_empty()) + }) + .skip(1) +} + +/// Given a fully-qualified path, returns a sequence of paths which match the suffix of the input +/// path, in decreasing path-length order. +/// +/// Example: suffixes(".a.b.c.d") -> ["a.b.c.d", "b.c.d", "c.d", "d"] +fn suffixes(fq_path: &str) -> impl Iterator { + std::iter::successors(Some(fq_path), |path| { + #[allow(unknown_lints, clippy::manual_split_once)] + path.splitn(2, '.').nth(1).filter(|path| !path.is_empty()) + }) + .skip(1) +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn test_prefixes() { + assert_eq!( + prefixes(".a.b.c.d").collect::>(), + vec![".a.b.c", ".a.b", ".a"], + ); + assert_eq!(prefixes(".a").count(), 0); + assert_eq!(prefixes(".").count(), 0); + } + + #[test] + fn test_suffixes() { + assert_eq!( + suffixes(".a.b.c.d").collect::>(), + vec!["a.b.c.d", "b.c.d", "c.d", "d"], + ); + assert_eq!(suffixes(".a").collect::>(), vec!["a"]); + assert_eq!(suffixes(".").collect::>(), Vec::<&str>::new()); + } + + #[test] + fn test_get_matches_sub_path() { + let mut path_map = PathMap::default(); + + // full path + path_map.insert(".a.b.c.d".to_owned(), 1); + assert_eq!(Some(&1), path_map.get(".a.b.c.d").next()); + assert_eq!(Some(&1), path_map.get_field(".a.b.c", "d").next()); + + // suffix + path_map.clear(); + path_map.insert("c.d".to_owned(), 1); + assert_eq!(Some(&1), path_map.get(".a.b.c.d").next()); + assert_eq!(Some(&1), path_map.get("b.c.d").next()); + assert_eq!(Some(&1), path_map.get_field(".a.b.c", "d").next()); + + // prefix + path_map.clear(); + path_map.insert(".a.b".to_owned(), 1); + assert_eq!(Some(&1), path_map.get(".a.b.c.d").next()); + assert_eq!(Some(&1), path_map.get_field(".a.b.c", "d").next()); + + // global + path_map.clear(); + path_map.insert(".".to_owned(), 1); + assert_eq!(Some(&1), path_map.get(".a.b.c.d").next()); + assert_eq!(Some(&1), path_map.get("b.c.d").next()); + assert_eq!(Some(&1), path_map.get_field(".a.b.c", "d").next()); + } + + #[test] + fn test_get_best() { + let mut path_map = PathMap::default(); + + // worst is global + path_map.insert(".".to_owned(), 1); + assert_eq!(Some(&1), path_map.get_first(".a.b.c.d")); + assert_eq!(Some(&1), path_map.get_first("b.c.d")); + assert_eq!(Some(&1), path_map.get_first_field(".a.b.c", "d")); + + // then prefix + path_map.insert(".a.b".to_owned(), 2); + assert_eq!(Some(&2), path_map.get_first(".a.b.c.d")); + assert_eq!(Some(&2), path_map.get_first_field(".a.b.c", "d")); + + // then suffix + path_map.insert("c.d".to_owned(), 3); + assert_eq!(Some(&3), path_map.get_first(".a.b.c.d")); + assert_eq!(Some(&3), path_map.get_first("b.c.d")); + assert_eq!(Some(&3), path_map.get_first_field(".a.b.c", "d")); + + // best is full path + path_map.insert(".a.b.c.d".to_owned(), 4); + assert_eq!(Some(&4), path_map.get_first(".a.b.c.d")); + assert_eq!(Some(&4), path_map.get_first_field(".a.b.c", "d")); + } + + #[test] + fn test_get_keep_order() { + let mut path_map = PathMap::default(); + path_map.insert(".".to_owned(), 1); + path_map.insert(".a.b".to_owned(), 2); + path_map.insert(".a.b.c.d".to_owned(), 3); + + let mut iter = path_map.get(".a.b.c.d"); + assert_eq!(Some(&1), iter.next()); + assert_eq!(Some(&2), iter.next()); + assert_eq!(Some(&3), iter.next()); + assert_eq!(None, iter.next()); + + path_map.clear(); + + path_map.insert(".a.b.c.d".to_owned(), 1); + path_map.insert(".a.b".to_owned(), 2); + path_map.insert(".".to_owned(), 3); + + let mut iter = path_map.get(".a.b.c.d"); + assert_eq!(Some(&1), iter.next()); + assert_eq!(Some(&2), iter.next()); + assert_eq!(Some(&3), iter.next()); + assert_eq!(None, iter.next()); + } +} diff --git a/prost-build/src/smoke_test.proto b/prost-build/src/smoke_test.proto new file mode 100644 index 0000000..04679a2 --- /dev/null +++ b/prost-build/src/smoke_test.proto @@ -0,0 +1,18 @@ +syntax = "proto2"; + +package smoke_test; + +message SmokeRequest { +} + +message SmokeResponse { +} + +// Just a smoke test service. +service SmokeService { + + // A detached comment block. + + // Blow some smoke. + rpc BlowSmoke(SmokeRequest) returns (SmokeResponse); +} diff --git a/prost-build/src/types.proto b/prost-build/src/types.proto new file mode 100644 index 0000000..4d9d5e0 --- /dev/null +++ b/prost-build/src/types.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package helloworld; + +message Message { + string say = 1; +} + +message Response { + string say = 1; +} diff --git a/prost-build/third-party/include/google/protobuf/any.proto b/prost-build/third-party/include/google/protobuf/any.proto new file mode 100644 index 0000000..6ed8a23 --- /dev/null +++ b/prost-build/third-party/include/google/protobuf/any.proto @@ -0,0 +1,158 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "google.golang.org/protobuf/types/known/anypb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/prost-build/third-party/include/google/protobuf/api.proto b/prost-build/third-party/include/google/protobuf/api.proto new file mode 100644 index 0000000..3d598fc --- /dev/null +++ b/prost-build/third-party/include/google/protobuf/api.proto @@ -0,0 +1,208 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "google/protobuf/source_context.proto"; +import "google/protobuf/type.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "ApiProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/protobuf/types/known/apipb"; + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +message Api { + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + string name = 1; + + // The methods of this interface, in unspecified order. + repeated Method methods = 2; + + // Any metadata attached to the interface. + repeated Option options = 3; + + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + string version = 4; + + // Source context for the protocol buffer service represented by this + // message. + SourceContext source_context = 5; + + // Included interfaces. See [Mixin][]. + repeated Mixin mixins = 6; + + // The source syntax of the service. + Syntax syntax = 7; +} + +// Method represents a method of an API interface. +message Method { + // The simple name of this method. + string name = 1; + + // A URL of the input message type. + string request_type_url = 2; + + // If true, the request is streamed. + bool request_streaming = 3; + + // The URL of the output message type. + string response_type_url = 4; + + // If true, the response is streamed. + bool response_streaming = 5; + + // Any metadata attached to the method. + repeated Option options = 6; + + // The source syntax of this method. + Syntax syntax = 7; +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inheriting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +message Mixin { + // The fully qualified name of the interface which is included. + string name = 1; + + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + string root = 2; +} diff --git a/prost-build/third-party/include/google/protobuf/compiler/plugin.proto b/prost-build/third-party/include/google/protobuf/compiler/plugin.proto new file mode 100644 index 0000000..9242aac --- /dev/null +++ b/prost-build/third-party/include/google/protobuf/compiler/plugin.proto @@ -0,0 +1,183 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; + +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "google.golang.org/protobuf/types/pluginpb"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; + +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // A bitmask of supported features that the code generator supports. + // This is a bitwise "or" of values from the Feature enum. + optional uint64 supported_features = 2; + + // Sync with code_generator.h. + enum Feature { + FEATURE_NONE = 0; + FEATURE_PROTO3_OPTIONAL = 1; + } + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + + // Information describing the file content being inserted. If an insertion + // point is used, this information will be appropriately offset and inserted + // into the code generation metadata for the generated files. + optional GeneratedCodeInfo generated_code_info = 16; + } + repeated File file = 15; +} diff --git a/prost-build/third-party/include/google/protobuf/descriptor.proto b/prost-build/third-party/include/google/protobuf/descriptor.proto new file mode 100644 index 0000000..156e410 --- /dev/null +++ b/prost-build/third-party/include/google/protobuf/descriptor.proto @@ -0,0 +1,911 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; + +option go_package = "google.golang.org/protobuf/types/descriptorpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + } + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + } + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; + + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + optional bool proto3_optional = 17; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default = false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default = false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // Controls the name of the wrapper Java class generated for the .proto file. + // That class will always contain the .proto file's getDescriptor() method as + // well as any top-level extensions defined in the .proto file. + // If java_multiple_files is disabled, then all the other classes from the + // .proto file will be nested inside the single wrapper outer class. + optional string java_outer_classname = 8; + + // If enabled, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the wrapper class + // named by java_outer_classname. However, the wrapper class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default = false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default = false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default = SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default = false]; + optional bool java_generic_services = 17 [default = false]; + optional bool py_generic_services = 18 [default = false]; + optional bool php_generic_services = 42 [default = false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default = false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default = true]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + optional string php_metadata_namespace = 44; + + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + optional string ruby_package = 45; + + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default = false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default = false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default = false]; + + reserved 4, 5, 6; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default = false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default = false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default = false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default = false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default = false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = 34 + [default = IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed = true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed = true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed = true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/prost-build/third-party/include/google/protobuf/duration.proto b/prost-build/third-party/include/google/protobuf/duration.proto new file mode 100644 index 0000000..81c3e36 --- /dev/null +++ b/prost-build/third-party/include/google/protobuf/duration.proto @@ -0,0 +1,116 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/durationpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/prost-build/third-party/include/google/protobuf/empty.proto b/prost-build/third-party/include/google/protobuf/empty.proto new file mode 100644 index 0000000..5f992de --- /dev/null +++ b/prost-build/third-party/include/google/protobuf/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "google.golang.org/protobuf/types/known/emptypb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/prost-build/third-party/include/google/protobuf/field_mask.proto b/prost-build/third-party/include/google/protobuf/field_mask.proto new file mode 100644 index 0000000..6b5104f --- /dev/null +++ b/prost-build/third-party/include/google/protobuf/field_mask.proto @@ -0,0 +1,245 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "FieldMaskProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/protobuf/types/known/fieldmaskpb"; +option cc_enable_arenas = true; + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is unmappable. +message FieldMask { + // The set of field mask paths. + repeated string paths = 1; +} diff --git a/prost-build/third-party/include/google/protobuf/source_context.proto b/prost-build/third-party/include/google/protobuf/source_context.proto new file mode 100644 index 0000000..06bfc43 --- /dev/null +++ b/prost-build/third-party/include/google/protobuf/source_context.proto @@ -0,0 +1,48 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "SourceContextProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/protobuf/types/known/sourcecontextpb"; + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +message SourceContext { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + string file_name = 1; +} diff --git a/prost-build/third-party/include/google/protobuf/struct.proto b/prost-build/third-party/include/google/protobuf/struct.proto new file mode 100644 index 0000000..0ac843c --- /dev/null +++ b/prost-build/third-party/include/google/protobuf/struct.proto @@ -0,0 +1,95 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of these +// variants. Absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/prost-build/third-party/include/google/protobuf/timestamp.proto b/prost-build/third-party/include/google/protobuf/timestamp.proto new file mode 100644 index 0000000..3b2df6d --- /dev/null +++ b/prost-build/third-party/include/google/protobuf/timestamp.proto @@ -0,0 +1,147 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/timestamppb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// +// Example 6: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/prost-build/third-party/include/google/protobuf/type.proto b/prost-build/third-party/include/google/protobuf/type.proto new file mode 100644 index 0000000..d3f6a68 --- /dev/null +++ b/prost-build/third-party/include/google/protobuf/type.proto @@ -0,0 +1,187 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "google/protobuf/any.proto"; +import "google/protobuf/source_context.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TypeProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/protobuf/types/known/typepb"; + +// A protocol buffer message type. +message Type { + // The fully qualified message name. + string name = 1; + // The list of fields. + repeated Field fields = 2; + // The list of types appearing in `oneof` definitions in this type. + repeated string oneofs = 3; + // The protocol buffer options. + repeated Option options = 4; + // The source context. + SourceContext source_context = 5; + // The source syntax. + Syntax syntax = 6; +} + +// A single field of a message type. +message Field { + // Basic field types. + enum Kind { + // Field type unknown. + TYPE_UNKNOWN = 0; + // Field type double. + TYPE_DOUBLE = 1; + // Field type float. + TYPE_FLOAT = 2; + // Field type int64. + TYPE_INT64 = 3; + // Field type uint64. + TYPE_UINT64 = 4; + // Field type int32. + TYPE_INT32 = 5; + // Field type fixed64. + TYPE_FIXED64 = 6; + // Field type fixed32. + TYPE_FIXED32 = 7; + // Field type bool. + TYPE_BOOL = 8; + // Field type string. + TYPE_STRING = 9; + // Field type group. Proto2 syntax only, and deprecated. + TYPE_GROUP = 10; + // Field type message. + TYPE_MESSAGE = 11; + // Field type bytes. + TYPE_BYTES = 12; + // Field type uint32. + TYPE_UINT32 = 13; + // Field type enum. + TYPE_ENUM = 14; + // Field type sfixed32. + TYPE_SFIXED32 = 15; + // Field type sfixed64. + TYPE_SFIXED64 = 16; + // Field type sint32. + TYPE_SINT32 = 17; + // Field type sint64. + TYPE_SINT64 = 18; + } + + // Whether a field is optional, required, or repeated. + enum Cardinality { + // For fields with unknown cardinality. + CARDINALITY_UNKNOWN = 0; + // For optional fields. + CARDINALITY_OPTIONAL = 1; + // For required fields. Proto2 syntax only. + CARDINALITY_REQUIRED = 2; + // For repeated fields. + CARDINALITY_REPEATED = 3; + } + + // The field type. + Kind kind = 1; + // The field cardinality. + Cardinality cardinality = 2; + // The field number. + int32 number = 3; + // The field name. + string name = 4; + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + string type_url = 6; + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + int32 oneof_index = 7; + // Whether to use alternative packed wire representation. + bool packed = 8; + // The protocol buffer options. + repeated Option options = 9; + // The field JSON name. + string json_name = 10; + // The string value of the default value of this field. Proto2 syntax only. + string default_value = 11; +} + +// Enum type definition. +message Enum { + // Enum type name. + string name = 1; + // Enum value definitions. + repeated EnumValue enumvalue = 2; + // Protocol buffer options. + repeated Option options = 3; + // The source context. + SourceContext source_context = 4; + // The source syntax. + Syntax syntax = 5; +} + +// Enum value definition. +message EnumValue { + // Enum value name. + string name = 1; + // Enum value number. + int32 number = 2; + // Protocol buffer options. + repeated Option options = 3; +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +message Option { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + string name = 1; + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Any value = 2; +} + +// The syntax in which a protocol buffer element is defined. +enum Syntax { + // Syntax `proto2`. + SYNTAX_PROTO2 = 0; + // Syntax `proto3`. + SYNTAX_PROTO3 = 1; +} diff --git a/prost-build/third-party/include/google/protobuf/wrappers.proto b/prost-build/third-party/include/google/protobuf/wrappers.proto new file mode 100644 index 0000000..d49dd53 --- /dev/null +++ b/prost-build/third-party/include/google/protobuf/wrappers.proto @@ -0,0 +1,123 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. +// +// These wrappers have no meaningful use within repeated fields as they lack +// the ability to detect presence on individual elements. +// These wrappers have no meaningful use within a map or a oneof since +// individual entries of a map or fields of a oneof can already detect presence. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/wrapperspb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/prost-build/third-party/update-vendored-protobuf.sh b/prost-build/third-party/update-vendored-protobuf.sh new file mode 100755 index 0000000..d8d741a --- /dev/null +++ b/prost-build/third-party/update-vendored-protobuf.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -ex + +if [ "$#" -ne 1 ] +then + echo "Usage: $0 " + exit 1 +fi + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +VERSION="$1" +TEMPDIR=$(mktemp -d "protobuf-$VERSION-XXX") +ARCH="linux-x86_64" + +mkdir "$TEMPDIR/$ARCH" +curl --proto '=https' --tlsv1.2 -sSfL \ + "https://github.com/protocolbuffers/protobuf/releases/download/v$VERSION/protoc-$VERSION-$ARCH.zip" \ + -o "$TEMPDIR/$ARCH/protoc.zip" + +unzip "$TEMPDIR/$ARCH/protoc.zip" -d "$TEMPDIR/$ARCH" + +# Update the include directory +rm -rf "$DIR/include" +mv "$TEMPDIR/linux-x86_64/include" "$DIR/include/" + + +rm -rf $TEMPDIR +cd "$DIR/protobuf" +git checkout "v$VERSION" +cd $DIR + +echo "third-party protobuf items updated to v$VERSION" \ No newline at end of file diff --git a/prost-derive/Cargo.toml b/prost-derive/Cargo.toml new file mode 100644 index 0000000..e32553d --- /dev/null +++ b/prost-derive/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "ntex-prost-derive" +version = "0.10.1" +authors = [ + "Dan Burkert ", + "Tokio Contributors ", +] +license = "Apache-2.0" +repository = "https://github.com/tokio-rs/prost" +documentation = "https://docs.rs/prost-derive" +readme = "README.md" +description = "A Protocol Buffers implementation for the Rust Language." +edition = "2018" + +[lib] +proc_macro = true + +[dependencies] +anyhow = "1.0.1" +itertools = "0.10" +proc-macro2 = "1" +quote = "1" +syn = { version = "1.0.3", features = [ "extra-traits" ] } diff --git a/prost-derive/LICENSE b/prost-derive/LICENSE new file mode 100644 index 0000000..16fe87b --- /dev/null +++ b/prost-derive/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/prost-derive/README.md b/prost-derive/README.md new file mode 100644 index 0000000..a51050e --- /dev/null +++ b/prost-derive/README.md @@ -0,0 +1,16 @@ +[![Documentation](https://docs.rs/prost-derive/badge.svg)](https://docs.rs/prost-derive/) +[![Crate](https://img.shields.io/crates/v/prost-derive.svg)](https://crates.io/crates/prost-derive) + +# prost-derive + +`prost-derive` handles generating encoding and decoding implementations for Rust +types annotated with `prost` annotation. For the most part, users of `prost` +shouldn't need to interact with `prost-derive` directly. + +## License + +`prost-derive` is distributed under the terms of the Apache License (Version 2.0). + +See [LICENSE](../LICENSE) for details. + +Copyright 2017 Dan Burkert diff --git a/prost-derive/src/field/group.rs b/prost-derive/src/field/group.rs new file mode 100644 index 0000000..076b577 --- /dev/null +++ b/prost-derive/src/field/group.rs @@ -0,0 +1,134 @@ +use anyhow::{bail, Error}; +use proc_macro2::TokenStream; +use quote::{quote, ToTokens}; +use syn::Meta; + +use crate::field::{set_bool, set_option, tag_attr, word_attr, Label}; + +#[derive(Clone)] +pub struct Field { + pub label: Label, + pub tag: u32, +} + +impl Field { + pub fn new(attrs: &[Meta], inferred_tag: Option) -> Result, Error> { + let mut group = false; + let mut label = None; + let mut tag = None; + let mut boxed = false; + + let mut unknown_attrs = Vec::new(); + + for attr in attrs { + if word_attr("group", attr) { + set_bool(&mut group, "duplicate group attributes")?; + } else if word_attr("boxed", attr) { + set_bool(&mut boxed, "duplicate boxed attributes")?; + } else if let Some(t) = tag_attr(attr)? { + set_option(&mut tag, t, "duplicate tag attributes")?; + } else if let Some(l) = Label::from_attr(attr) { + set_option(&mut label, l, "duplicate label attributes")?; + } else { + unknown_attrs.push(attr); + } + } + + if !group { + return Ok(None); + } + + match unknown_attrs.len() { + 0 => (), + 1 => bail!("unknown attribute for group field: {:?}", unknown_attrs[0]), + _ => bail!("unknown attributes for group field: {:?}", unknown_attrs), + } + + let tag = match tag.or(inferred_tag) { + Some(tag) => tag, + None => bail!("group field is missing a tag attribute"), + }; + + Ok(Some(Field { + label: label.unwrap_or(Label::Optional), + tag, + })) + } + + pub fn new_oneof(attrs: &[Meta]) -> Result, Error> { + if let Some(mut field) = Field::new(attrs, None)? { + if let Some(attr) = attrs.iter().find(|attr| Label::from_attr(attr).is_some()) { + bail!( + "invalid attribute for oneof field: {}", + attr.path().into_token_stream() + ); + } + field.label = Label::Required; + Ok(Some(field)) + } else { + Ok(None) + } + } + + pub fn encode(&self, ident: TokenStream) -> TokenStream { + let tag = self.tag; + match self.label { + Label::Optional => quote! { + if let Some(ref msg) = #ident { + ::prost::encoding::group::encode(#tag, msg, buf); + } + }, + Label::Required => quote! { + ::prost::encoding::group::encode(#tag, &#ident, buf); + }, + Label::Repeated => quote! { + for msg in &#ident { + ::prost::encoding::group::encode(#tag, msg, buf); + } + }, + } + } + + pub fn merge(&self, ident: TokenStream) -> TokenStream { + match self.label { + Label::Optional => quote! { + ::prost::encoding::group::merge( + tag, + wire_type, + #ident.get_or_insert_with(::core::default::Default::default), + buf, + ctx, + ) + }, + Label::Required => quote! { + ::prost::encoding::group::merge(tag, wire_type, #ident, buf, ctx) + }, + Label::Repeated => quote! { + ::prost::encoding::group::merge_repeated(tag, wire_type, #ident, buf, ctx) + }, + } + } + + pub fn encoded_len(&self, ident: TokenStream) -> TokenStream { + let tag = self.tag; + match self.label { + Label::Optional => quote! { + #ident.as_ref().map_or(0, |msg| ::prost::encoding::group::encoded_len(#tag, msg)) + }, + Label::Required => quote! { + ::prost::encoding::group::encoded_len(#tag, &#ident) + }, + Label::Repeated => quote! { + ::prost::encoding::group::encoded_len_repeated(#tag, &#ident) + }, + } + } + + pub fn clear(&self, ident: TokenStream) -> TokenStream { + match self.label { + Label::Optional => quote!(#ident = ::core::option::Option::None), + Label::Required => quote!(#ident.clear()), + Label::Repeated => quote!(#ident.clear()), + } + } +} diff --git a/prost-derive/src/field/map.rs b/prost-derive/src/field/map.rs new file mode 100644 index 0000000..a512f05 --- /dev/null +++ b/prost-derive/src/field/map.rs @@ -0,0 +1,405 @@ +use anyhow::{bail, Error}; +use proc_macro2::{Span, TokenStream}; +use quote::quote; +use syn::{Ident, Lit, Meta, MetaNameValue, NestedMeta}; + +use crate::field::{scalar, set_option, tag_attr}; + +#[derive(Clone, Debug)] +pub enum MapTy { + HashMap, + BTreeMap, +} + +impl MapTy { + fn from_str(s: &str) -> Option { + match s { + "map" | "hash_map" => Some(MapTy::HashMap), + "btree_map" => Some(MapTy::BTreeMap), + _ => None, + } + } + + fn module(&self) -> Ident { + match *self { + MapTy::HashMap => Ident::new("hash_map", Span::call_site()), + MapTy::BTreeMap => Ident::new("btree_map", Span::call_site()), + } + } + + fn lib(&self) -> TokenStream { + match self { + MapTy::HashMap => quote! { std }, + MapTy::BTreeMap => quote! { prost::alloc }, + } + } +} + +fn fake_scalar(ty: scalar::Ty) -> scalar::Field { + let kind = scalar::Kind::Plain(scalar::DefaultValue::new(&ty)); + scalar::Field { + ty, + kind, + tag: 0, // Not used here + } +} + +#[derive(Clone)] +pub struct Field { + pub map_ty: MapTy, + pub key_ty: scalar::Ty, + pub value_ty: ValueTy, + pub tag: u32, +} + +impl Field { + pub fn new(attrs: &[Meta], inferred_tag: Option) -> Result, Error> { + let mut types = None; + let mut tag = None; + + for attr in attrs { + if let Some(t) = tag_attr(attr)? { + set_option(&mut tag, t, "duplicate tag attributes")?; + } else if let Some(map_ty) = attr + .path() + .get_ident() + .and_then(|i| MapTy::from_str(&i.to_string())) + { + let (k, v): (String, String) = match &*attr { + Meta::NameValue(MetaNameValue { + lit: Lit::Str(lit), .. + }) => { + let items = lit.value(); + let mut items = items.split(',').map(ToString::to_string); + let k = items.next().unwrap(); + let v = match items.next() { + Some(k) => k, + None => bail!("invalid map attribute: must have key and value types"), + }; + if items.next().is_some() { + bail!("invalid map attribute: {:?}", attr); + } + (k, v) + } + Meta::List(meta_list) => { + // TODO(rustlang/rust#23121): slice pattern matching would make this much nicer. + if meta_list.nested.len() != 2 { + bail!("invalid map attribute: must contain key and value types"); + } + let k = match &meta_list.nested[0] { + NestedMeta::Meta(Meta::Path(k)) if k.get_ident().is_some() => { + k.get_ident().unwrap().to_string() + } + _ => bail!("invalid map attribute: key must be an identifier"), + }; + let v = match &meta_list.nested[1] { + NestedMeta::Meta(Meta::Path(v)) if v.get_ident().is_some() => { + v.get_ident().unwrap().to_string() + } + _ => bail!("invalid map attribute: value must be an identifier"), + }; + (k, v) + } + _ => return Ok(None), + }; + set_option( + &mut types, + (map_ty, key_ty_from_str(&k)?, ValueTy::from_str(&v)?), + "duplicate map type attribute", + )?; + } else { + return Ok(None); + } + } + + Ok(match (types, tag.or(inferred_tag)) { + (Some((map_ty, key_ty, value_ty)), Some(tag)) => Some(Field { + map_ty, + key_ty, + value_ty, + tag, + }), + _ => None, + }) + } + + pub fn new_oneof(attrs: &[Meta]) -> Result, Error> { + Field::new(attrs, None) + } + + /// Returns a statement which encodes the map field. + pub fn encode(&self, ident: TokenStream) -> TokenStream { + let tag = self.tag; + let key_mod = self.key_ty.module(); + let ke = quote!(::prost::encoding::#key_mod::encode); + let kl = quote!(::prost::encoding::#key_mod::encoded_len); + let module = self.map_ty.module(); + match &self.value_ty { + ValueTy::Scalar(scalar::Ty::Enumeration(ty)) => { + let default = quote!(#ty::default() as i32); + quote! { + ::prost::encoding::#module::encode_with_default( + #ke, + #kl, + ::prost::encoding::int32::encode, + ::prost::encoding::int32::encoded_len, + &(#default), + #tag, + &#ident, + buf, + ); + } + } + ValueTy::Scalar(value_ty) => { + let val_mod = value_ty.module(); + let ve = quote!(::prost::encoding::#val_mod::encode); + let vl = quote!(::prost::encoding::#val_mod::encoded_len); + quote! { + ::prost::encoding::#module::encode( + #ke, + #kl, + #ve, + #vl, + #tag, + &#ident, + buf, + ); + } + } + ValueTy::Message => quote! { + ::prost::encoding::#module::encode( + #ke, + #kl, + ::prost::encoding::message::encode, + ::prost::encoding::message::encoded_len, + #tag, + &#ident, + buf, + ); + }, + } + } + + /// Returns an expression which evaluates to the result of merging a decoded key value pair + /// into the map. + pub fn merge(&self, ident: TokenStream) -> TokenStream { + let key_mod = self.key_ty.module(); + let km = quote!(::prost::encoding::#key_mod::merge); + let module = self.map_ty.module(); + match &self.value_ty { + ValueTy::Scalar(scalar::Ty::Enumeration(ty)) => { + let default = quote!(#ty::default() as i32); + quote! { + ::prost::encoding::#module::merge_with_default( + #km, + ::prost::encoding::int32::merge, + #default, + &mut #ident, + buf, + ctx, + ) + } + } + ValueTy::Scalar(value_ty) => { + let val_mod = value_ty.module(); + let vm = quote!(::prost::encoding::#val_mod::merge); + quote!(::prost::encoding::#module::merge(#km, #vm, &mut #ident, buf, ctx)) + } + ValueTy::Message => quote! { + ::prost::encoding::#module::merge( + #km, + ::prost::encoding::message::merge, + &mut #ident, + buf, + ctx, + ) + }, + } + } + + /// Returns an expression which evaluates to the encoded length of the map. + pub fn encoded_len(&self, ident: TokenStream) -> TokenStream { + let tag = self.tag; + let key_mod = self.key_ty.module(); + let kl = quote!(::prost::encoding::#key_mod::encoded_len); + let module = self.map_ty.module(); + match &self.value_ty { + ValueTy::Scalar(scalar::Ty::Enumeration(ty)) => { + let default = quote!(#ty::default() as i32); + quote! { + ::prost::encoding::#module::encoded_len_with_default( + #kl, + ::prost::encoding::int32::encoded_len, + &(#default), + #tag, + &#ident, + ) + } + } + ValueTy::Scalar(value_ty) => { + let val_mod = value_ty.module(); + let vl = quote!(::prost::encoding::#val_mod::encoded_len); + quote!(::prost::encoding::#module::encoded_len(#kl, #vl, #tag, &#ident)) + } + ValueTy::Message => quote! { + ::prost::encoding::#module::encoded_len( + #kl, + ::prost::encoding::message::encoded_len, + #tag, + &#ident, + ) + }, + } + } + + pub fn clear(&self, ident: TokenStream) -> TokenStream { + quote!(#ident.clear()) + } + + /// Returns methods to embed in the message. + pub fn methods(&self, ident: &Ident) -> Option { + if let ValueTy::Scalar(scalar::Ty::Enumeration(ty)) = &self.value_ty { + let key_ty = self.key_ty.rust_type(); + let key_ref_ty = self.key_ty.rust_ref_type(); + + let get = Ident::new(&format!("get_{}", ident), Span::call_site()); + let insert = Ident::new(&format!("insert_{}", ident), Span::call_site()); + let take_ref = if self.key_ty.is_numeric() { + quote!(&) + } else { + quote!() + }; + + let get_doc = format!( + "Returns the enum value for the corresponding key in `{}`, \ + or `None` if the entry does not exist or it is not a valid enum value.", + ident, + ); + let insert_doc = format!("Inserts a key value pair into `{}`.", ident); + Some(quote! { + #[doc=#get_doc] + pub fn #get(&self, key: #key_ref_ty) -> ::core::option::Option<#ty> { + self.#ident.get(#take_ref key).cloned().and_then(#ty::from_i32) + } + #[doc=#insert_doc] + pub fn #insert(&mut self, key: #key_ty, value: #ty) -> ::core::option::Option<#ty> { + self.#ident.insert(key, value as i32).and_then(#ty::from_i32) + } + }) + } else { + None + } + } + + /// Returns a newtype wrapper around the map, implementing nicer Debug + /// + /// The Debug tries to convert any enumerations met into the variants if possible, instead of + /// outputting the raw numbers. + pub fn debug(&self, wrapper_name: TokenStream) -> TokenStream { + let type_name = match self.map_ty { + MapTy::HashMap => Ident::new("HashMap", Span::call_site()), + MapTy::BTreeMap => Ident::new("BTreeMap", Span::call_site()), + }; + + // A fake field for generating the debug wrapper + let key_wrapper = fake_scalar(self.key_ty.clone()).debug(quote!(KeyWrapper)); + let key = self.key_ty.rust_type(); + let value_wrapper = self.value_ty.debug(); + let libname = self.map_ty.lib(); + let fmt = quote! { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + #key_wrapper + #value_wrapper + let mut builder = f.debug_map(); + for (k, v) in self.0 { + builder.entry(&KeyWrapper(k), &ValueWrapper(v)); + } + builder.finish() + } + }; + match &self.value_ty { + ValueTy::Scalar(ty) => { + if let scalar::Ty::Bytes = *ty { + return quote! { + struct #wrapper_name<'a>(&'a dyn ::core::fmt::Debug); + impl<'a> ::core::fmt::Debug for #wrapper_name<'a> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + self.0.fmt(f) + } + } + }; + } + + let value = ty.rust_type(); + quote! { + struct #wrapper_name<'a>(&'a ::#libname::collections::#type_name<#key, #value>); + impl<'a> ::core::fmt::Debug for #wrapper_name<'a> { + #fmt + } + } + } + ValueTy::Message => quote! { + struct #wrapper_name<'a, V: 'a>(&'a ::#libname::collections::#type_name<#key, V>); + impl<'a, V> ::core::fmt::Debug for #wrapper_name<'a, V> + where + V: ::core::fmt::Debug + 'a, + { + #fmt + } + }, + } + } +} + +fn key_ty_from_str(s: &str) -> Result { + let ty = scalar::Ty::from_str(s)?; + match ty { + scalar::Ty::Int32 + | scalar::Ty::Int64 + | scalar::Ty::Uint32 + | scalar::Ty::Uint64 + | scalar::Ty::Sint32 + | scalar::Ty::Sint64 + | scalar::Ty::Fixed32 + | scalar::Ty::Fixed64 + | scalar::Ty::Sfixed32 + | scalar::Ty::Sfixed64 + | scalar::Ty::Bool + | scalar::Ty::String => Ok(ty), + _ => bail!("invalid map key type: {}", s), + } +} + +/// A map value type. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum ValueTy { + Scalar(scalar::Ty), + Message, +} + +impl ValueTy { + fn from_str(s: &str) -> Result { + if let Ok(ty) = scalar::Ty::from_str(s) { + Ok(ValueTy::Scalar(ty)) + } else if s.trim() == "message" { + Ok(ValueTy::Message) + } else { + bail!("invalid map value type: {}", s); + } + } + + /// Returns a newtype wrapper around the ValueTy for nicer debug. + /// + /// If the contained value is enumeration, it tries to convert it to the variant. If not, it + /// just forwards the implementation. + fn debug(&self) -> TokenStream { + match self { + ValueTy::Scalar(ty) => fake_scalar(ty.clone()).debug(quote!(ValueWrapper)), + ValueTy::Message => quote!( + fn ValueWrapper(v: T) -> T { + v + } + ), + } + } +} diff --git a/prost-derive/src/field/message.rs b/prost-derive/src/field/message.rs new file mode 100644 index 0000000..3bcdddf --- /dev/null +++ b/prost-derive/src/field/message.rs @@ -0,0 +1,134 @@ +use anyhow::{bail, Error}; +use proc_macro2::TokenStream; +use quote::{quote, ToTokens}; +use syn::Meta; + +use crate::field::{set_bool, set_option, tag_attr, word_attr, Label}; + +#[derive(Clone)] +pub struct Field { + pub label: Label, + pub tag: u32, +} + +impl Field { + pub fn new(attrs: &[Meta], inferred_tag: Option) -> Result, Error> { + let mut message = false; + let mut label = None; + let mut tag = None; + let mut boxed = false; + + let mut unknown_attrs = Vec::new(); + + for attr in attrs { + if word_attr("message", attr) { + set_bool(&mut message, "duplicate message attribute")?; + } else if word_attr("boxed", attr) { + set_bool(&mut boxed, "duplicate boxed attribute")?; + } else if let Some(t) = tag_attr(attr)? { + set_option(&mut tag, t, "duplicate tag attributes")?; + } else if let Some(l) = Label::from_attr(attr) { + set_option(&mut label, l, "duplicate label attributes")?; + } else { + unknown_attrs.push(attr); + } + } + + if !message { + return Ok(None); + } + + match unknown_attrs.len() { + 0 => (), + 1 => bail!( + "unknown attribute for message field: {:?}", + unknown_attrs[0] + ), + _ => bail!("unknown attributes for message field: {:?}", unknown_attrs), + } + + let tag = match tag.or(inferred_tag) { + Some(tag) => tag, + None => bail!("message field is missing a tag attribute"), + }; + + Ok(Some(Field { + label: label.unwrap_or(Label::Optional), + tag, + })) + } + + pub fn new_oneof(attrs: &[Meta]) -> Result, Error> { + if let Some(mut field) = Field::new(attrs, None)? { + if let Some(attr) = attrs.iter().find(|attr| Label::from_attr(attr).is_some()) { + bail!( + "invalid attribute for oneof field: {}", + attr.path().into_token_stream() + ); + } + field.label = Label::Required; + Ok(Some(field)) + } else { + Ok(None) + } + } + + pub fn encode(&self, ident: TokenStream) -> TokenStream { + let tag = self.tag; + match self.label { + Label::Optional => quote! { + if let Some(ref msg) = #ident { + ::prost::encoding::message::encode(#tag, msg, buf); + } + }, + Label::Required => quote! { + ::prost::encoding::message::encode(#tag, &#ident, buf); + }, + Label::Repeated => quote! { + for msg in &#ident { + ::prost::encoding::message::encode(#tag, msg, buf); + } + }, + } + } + + pub fn merge(&self, ident: TokenStream) -> TokenStream { + match self.label { + Label::Optional => quote! { + ::prost::encoding::message::merge(wire_type, + #ident.get_or_insert_with(::core::default::Default::default), + buf, + ctx) + }, + Label::Required => quote! { + ::prost::encoding::message::merge(wire_type, #ident, buf, ctx) + }, + Label::Repeated => quote! { + ::prost::encoding::message::merge_repeated(wire_type, #ident, buf, ctx) + }, + } + } + + pub fn encoded_len(&self, ident: TokenStream) -> TokenStream { + let tag = self.tag; + match self.label { + Label::Optional => quote! { + #ident.as_ref().map_or(0, |msg| ::prost::encoding::message::encoded_len(#tag, msg)) + }, + Label::Required => quote! { + ::prost::encoding::message::encoded_len(#tag, &#ident) + }, + Label::Repeated => quote! { + ::prost::encoding::message::encoded_len_repeated(#tag, &#ident) + }, + } + } + + pub fn clear(&self, ident: TokenStream) -> TokenStream { + match self.label { + Label::Optional => quote!(#ident = ::core::option::Option::None), + Label::Required => quote!(#ident.clear()), + Label::Repeated => quote!(#ident.clear()), + } + } +} diff --git a/prost-derive/src/field/mod.rs b/prost-derive/src/field/mod.rs new file mode 100644 index 0000000..09fef83 --- /dev/null +++ b/prost-derive/src/field/mod.rs @@ -0,0 +1,366 @@ +mod group; +mod map; +mod message; +mod oneof; +mod scalar; + +use std::fmt; +use std::slice; + +use anyhow::{bail, Error}; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{Attribute, Ident, Lit, LitBool, Meta, MetaList, MetaNameValue, NestedMeta}; + +#[derive(Clone)] +pub enum Field { + /// A scalar field. + Scalar(scalar::Field), + /// A message field. + Message(message::Field), + /// A map field. + Map(map::Field), + /// A oneof field. + Oneof(oneof::Field), + /// A group field. + Group(group::Field), +} + +impl Field { + /// Creates a new `Field` from an iterator of field attributes. + /// + /// If the meta items are invalid, an error will be returned. + /// If the field should be ignored, `None` is returned. + pub fn new(attrs: Vec, inferred_tag: Option) -> Result, Error> { + let attrs = prost_attrs(attrs); + + // TODO: check for ignore attribute. + + let field = if let Some(field) = scalar::Field::new(&attrs, inferred_tag)? { + Field::Scalar(field) + } else if let Some(field) = message::Field::new(&attrs, inferred_tag)? { + Field::Message(field) + } else if let Some(field) = map::Field::new(&attrs, inferred_tag)? { + Field::Map(field) + } else if let Some(field) = oneof::Field::new(&attrs)? { + Field::Oneof(field) + } else if let Some(field) = group::Field::new(&attrs, inferred_tag)? { + Field::Group(field) + } else { + bail!("no type attribute"); + }; + + Ok(Some(field)) + } + + /// Creates a new oneof `Field` from an iterator of field attributes. + /// + /// If the meta items are invalid, an error will be returned. + /// If the field should be ignored, `None` is returned. + pub fn new_oneof(attrs: Vec) -> Result, Error> { + let attrs = prost_attrs(attrs); + + // TODO: check for ignore attribute. + + let field = if let Some(field) = scalar::Field::new_oneof(&attrs)? { + Field::Scalar(field) + } else if let Some(field) = message::Field::new_oneof(&attrs)? { + Field::Message(field) + } else if let Some(field) = map::Field::new_oneof(&attrs)? { + Field::Map(field) + } else if let Some(field) = group::Field::new_oneof(&attrs)? { + Field::Group(field) + } else { + bail!("no type attribute for oneof field"); + }; + + Ok(Some(field)) + } + + pub fn tags(&self) -> Vec { + match *self { + Field::Scalar(ref scalar) => vec![scalar.tag], + Field::Message(ref message) => vec![message.tag], + Field::Map(ref map) => vec![map.tag], + Field::Oneof(ref oneof) => oneof.tags.clone(), + Field::Group(ref group) => vec![group.tag], + } + } + + /// Returns a statement which encodes the field. + pub fn encode(&self, ident: TokenStream) -> TokenStream { + match *self { + Field::Scalar(ref scalar) => scalar.encode(ident), + Field::Message(ref message) => message.encode(ident), + Field::Map(ref map) => map.encode(ident), + Field::Oneof(ref oneof) => oneof.encode(ident), + Field::Group(ref group) => group.encode(ident), + } + } + + /// Returns an expression which evaluates to the result of merging a decoded + /// value into the field. + pub fn merge(&self, ident: TokenStream) -> TokenStream { + match *self { + Field::Scalar(ref scalar) => scalar.merge(ident), + Field::Message(ref message) => message.merge(ident), + Field::Map(ref map) => map.merge(ident), + Field::Oneof(ref oneof) => oneof.merge(ident), + Field::Group(ref group) => group.merge(ident), + } + } + + /// Returns an expression which evaluates to the encoded length of the field. + pub fn encoded_len(&self, ident: TokenStream) -> TokenStream { + match *self { + Field::Scalar(ref scalar) => scalar.encoded_len(ident), + Field::Map(ref map) => map.encoded_len(ident), + Field::Message(ref msg) => msg.encoded_len(ident), + Field::Oneof(ref oneof) => oneof.encoded_len(ident), + Field::Group(ref group) => group.encoded_len(ident), + } + } + + /// Returns a statement which clears the field. + pub fn clear(&self, ident: TokenStream) -> TokenStream { + match *self { + Field::Scalar(ref scalar) => scalar.clear(ident), + Field::Message(ref message) => message.clear(ident), + Field::Map(ref map) => map.clear(ident), + Field::Oneof(ref oneof) => oneof.clear(ident), + Field::Group(ref group) => group.clear(ident), + } + } + + pub fn default(&self) -> TokenStream { + match *self { + Field::Scalar(ref scalar) => scalar.default(), + _ => quote!(::core::default::Default::default()), + } + } + + /// Produces the fragment implementing debug for the given field. + pub fn debug(&self, ident: TokenStream) -> TokenStream { + match *self { + Field::Scalar(ref scalar) => { + let wrapper = scalar.debug(quote!(ScalarWrapper)); + quote! { + { + #wrapper + ScalarWrapper(&#ident) + } + } + } + Field::Map(ref map) => { + let wrapper = map.debug(quote!(MapWrapper)); + quote! { + { + #wrapper + MapWrapper(&#ident) + } + } + } + _ => quote!(&#ident), + } + } + + pub fn methods(&self, ident: &Ident) -> Option { + match *self { + Field::Scalar(ref scalar) => scalar.methods(ident), + Field::Map(ref map) => map.methods(ident), + _ => None, + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum Label { + /// An optional field. + Optional, + /// A required field. + Required, + /// A repeated field. + Repeated, +} + +impl Label { + fn as_str(self) -> &'static str { + match self { + Label::Optional => "optional", + Label::Required => "required", + Label::Repeated => "repeated", + } + } + + fn variants() -> slice::Iter<'static, Label> { + const VARIANTS: &[Label] = &[Label::Optional, Label::Required, Label::Repeated]; + VARIANTS.iter() + } + + /// Parses a string into a field label. + /// If the string doesn't match a field label, `None` is returned. + fn from_attr(attr: &Meta) -> Option