aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--LICENSE21
-rw-r--r--README.md48
-rw-r--r--fetch.nim70
-rw-r--r--nimpb_build.nimble16
-rw-r--r--src/nimpb_build.nim148
-rw-r--r--src/nimpb_buildpkg/descriptor_pb.nim368
-rw-r--r--src/nimpb_buildpkg/gen.nim655
-rw-r--r--src/nimpb_buildpkg/plugin.nim923
-rw-r--r--src/nimpb_buildpkg/plugin_pb.nim160
-rw-r--r--src/nimpb_buildpkg/protobuf/LICENSE42
-rw-r--r--src/nimpb_buildpkg/protobuf/include/google/protobuf/any.proto149
-rw-r--r--src/nimpb_buildpkg/protobuf/include/google/protobuf/api.proto210
-rw-r--r--src/nimpb_buildpkg/protobuf/include/google/protobuf/compiler/plugin.proto167
-rw-r--r--src/nimpb_buildpkg/protobuf/include/google/protobuf/descriptor.proto872
-rw-r--r--src/nimpb_buildpkg/protobuf/include/google/protobuf/duration.proto117
-rw-r--r--src/nimpb_buildpkg/protobuf/include/google/protobuf/empty.proto52
-rw-r--r--src/nimpb_buildpkg/protobuf/include/google/protobuf/field_mask.proto252
-rw-r--r--src/nimpb_buildpkg/protobuf/include/google/protobuf/source_context.proto48
-rw-r--r--src/nimpb_buildpkg/protobuf/include/google/protobuf/struct.proto96
-rw-r--r--src/nimpb_buildpkg/protobuf/include/google/protobuf/timestamp.proto133
-rw-r--r--src/nimpb_buildpkg/protobuf/include/google/protobuf/type.proto187
-rw-r--r--src/nimpb_buildpkg/protobuf/include/google/protobuf/wrappers.proto118
-rwxr-xr-xsrc/nimpb_buildpkg/protobuf/protoc-linux-aarch_64bin0 -> 4413084 bytes
-rwxr-xr-xsrc/nimpb_buildpkg/protobuf/protoc-linux-x86_32bin0 -> 4132368 bytes
-rwxr-xr-xsrc/nimpb_buildpkg/protobuf/protoc-linux-x86_64bin0 -> 4433736 bytes
-rwxr-xr-xsrc/nimpb_buildpkg/protobuf/protoc-osx-x86_64bin0 -> 7514608 bytes
-rwxr-xr-xsrc/nimpb_buildpkg/protobuf/protoc-win32.exebin0 -> 4035584 bytes
27 files changed, 4852 insertions, 0 deletions
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..f593051
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 Oskari Timperi
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..3760e6f
--- /dev/null
+++ b/README.md
@@ -0,0 +1,48 @@
+# nimpb_build
+
+**NOTE** nimpb_build is still experimental :-)
+
+A tool for generating suitable Nim code for
+[nimpb](https://github.com/oswjk/nimpb). It uses a prebuilt and bundled protoc
+compiler. This tool supports the following platforms:
+
+- Linux x86_32
+- Linux x86_64
+- Linux aarch_64
+- OSX x86_64
+- Windows
+
+nimpb_build is modeled somewhat after [prost-build](https://github.com/danburkert/prost).
+
+# Install with Nimble
+
+ $ nimble install https://github.com/oswjk/nimpb_build
+
+# Usage
+
+Using the tool is simple:
+
+ $ nimpb_build -I. --out=. my.proto
+
+It's almost like using protoc directly. In fact, the arguments are basically
+passed along to protoc.
+
+You can specify nimpb_build as a dependency for your project in your .nimble
+file and create a task for generating code:
+
+ requires "nimpb_build"
+
+ task proto, "Process .proto files":
+ exec "nimpb_build -I. --out=. my.proto"
+
+# How it works
+
+nimpb_build includes functionality to invoke the protoc compiler. It also
+includes a built-in protoc plugin, that protoc will use to generate the Nim
+code.
+
+First, nimpb_build will execute protoc with correct arguments. It will also
+pass itself as a plugin using the --plugin argument to protoc. nimpb_build
+will set the NIMPB_BUILD_PLUGIN=1 environment variable when executing protoc,
+so that when protoc executes nimpb_build, the new nimpb_build instance knows
+to work in protoc plugin mode.
diff --git a/fetch.nim b/fetch.nim
new file mode 100644
index 0000000..8e1e262
--- /dev/null
+++ b/fetch.nim
@@ -0,0 +1,70 @@
+import httpclient
+import os
+import osproc
+import strformat
+
+const
+ Version = "3.5.1"
+ BaseUrl = "https://github.com/google/protobuf/releases/download"
+ Systems = [
+ "linux-aarch_64",
+ "linux-x86_32",
+ "linux-x86_64",
+ "osx-x86_64",
+ "win32",
+ ]
+
+proc zipName(identifier: string): string =
+ &"protoc-{Version}-{identifier}.zip"
+
+proc exeSuffix(identifier: string): string =
+ result = ""
+ if identifier == "win32":
+ result = ".exe"
+
+proc compilerName(identifier: string): string =
+ &"protoc-{identifier}{exeSuffix(identifier)}"
+
+proc downloadFile(url, target: string) =
+ let
+ client = newHttpClient()
+ echo(&"downloading {url} -> {target}")
+ if not fileExists(target):
+ downloadFile(client, url, target)
+
+proc downloadRelease(identifier: string) =
+ let
+ url = &"{BaseUrl}/v{Version}/{zipName(identifier)}"
+ target = zipName(identifier)
+ downloadFile(url, target)
+
+proc downloadSources() =
+ let url = &"https://github.com/google/protobuf/archive/v{Version}.zip"
+ downloadFile(url, &"v{Version}.zip")
+
+proc extractCompiler(identifier: string) =
+ echo(&"extracting compiler: {identifier}")
+ createDir("src/nimpb_buildpkg/protobuf")
+ let args = @["-j", "-o", zipName(identifier), &"bin/protoc{exeSuffix(identifier)}"]
+ discard execProcess("unzip", args, nil, {poStdErrToStdout, poUsePath})
+ moveFile(&"protoc{exeSuffix(identifier)}", &"src/nimpb_buildpkg/protobuf/{compilerName(identifier)}")
+
+proc extractIncludes() =
+ echo("extracting includes")
+ createDir("src/nimpb_buildpkg/protobuf")
+ let args = @["-o", zipName("linux-x86_64"), "include/*", "-d", "src/nimpb_buildpkg/protobuf"]
+ discard execProcess("unzip", args, nil, {poStdErrToStdout, poUsePath})
+
+proc extractLicense() =
+ echo("extracting LICENSE")
+ let args = @["-o", "-j", &"v{Version}.zip", &"protobuf-{Version}/LICENSE", "-d", "src/nimpb_buildpkg/protobuf"]
+ discard execProcess("unzip", args, nil, {poStdErrToStdout, poUsePath})
+
+for system in Systems:
+ downloadRelease(system)
+ extractCompiler(system)
+
+downloadSources()
+
+extractIncludes()
+extractLicense()
diff --git a/nimpb_build.nimble b/nimpb_build.nimble
new file mode 100644
index 0000000..9ce9ca9
--- /dev/null
+++ b/nimpb_build.nimble
@@ -0,0 +1,16 @@
+# Package
+
+version = "0.1.0"
+author = "Oskari Timperi"
+description = "A Protocol Buffers code generator for nimpb"
+license = "MIT"
+srcDir = "src"
+bin = @["nimpb_build"]
+
+# Dependencies
+
+requires "nim >= 0.18.0"
+requires "nimpb"
+
+task fetch, "fetch prebuilt protoc binaries":
+ exec "nim c -d:ssl -r fetch"
diff --git a/src/nimpb_build.nim b/src/nimpb_build.nim
new file mode 100644
index 0000000..660f374
--- /dev/null
+++ b/src/nimpb_build.nim
@@ -0,0 +1,148 @@
+import os
+import osproc
+import streams
+import strformat
+import strtabs
+import strutils
+
+from nimpb_buildpkg/plugin import pluginMain
+
+when defined(windows):
+ const compilerId = "win32"
+elif defined(linux):
+ when defined(i386):
+ const arch = "x86_32"
+ elif defined(amd64):
+ const arch = "x86_64"
+ elif defined(arm64):
+ const arch = "aarch_64"
+ else:
+ {.fatal:"unsupported architecture".}
+ const compilerId = "linux-" & arch
+elif defined(macosx):
+ when defined(amd64):
+ const arch = "x86_64"
+ else:
+ {.fatal:"unsupported architecture".}
+ const compilerId = "osx-" & arch
+else:
+ {.fatal:"unsupported platform".}
+
+when defined(windows):
+ const exeSuffix = ".exe"
+else:
+ const exeSuffix = ""
+
+proc findCompiler(): string =
+ let
+ compilerName = &"protoc-{compilerId}{exeSuffix}"
+ paths = @[
+ getAppDir() / "src" / "nimpb_buildpkg" / "protobuf",
+ getAppDir() / "nimpb_buildpkg" / "protobuf",
+ ]
+
+ for path in paths:
+ if fileExists(path / compilerName):
+ return path / compilerName
+
+ raise newException(Exception, &"{compilerName} not found!")
+
+proc builtinIncludeDir(compilerPath: string): string =
+ parentDir(compilerPath) / "include"
+
+template verboseEcho(x: untyped): untyped =
+ if verbose:
+ echo(x)
+
+proc compileProtos*(protos: openArray[string], outdir: string,
+ includes: openArray[string], verbose: bool) =
+ let command = findCompiler()
+ var baseArgs: seq[string] = @[]
+
+ add(baseArgs, &"--plugin=protoc-gen-nim={getAppFilename()}")
+
+ for incdir in includes:
+ verboseEcho(&"Adding include directory: {incdir}")
+ add(baseArgs, &"-I{incdir}")
+
+ add(baseArgs, &"-I{builtinIncludeDir(command)}")
+ verboseEcho(&"Adding include directory: {builtinIncludeDir(command)}")
+
+ add(baseArgs, &"--nim_out={outdir}")
+ verboseEcho(&"Output directory: {outdir}")
+
+ for proto in protos:
+ var args = baseArgs
+ add(args, proto)
+ var options = {poStdErrToStdOut}
+ if verbose:
+ incl(options, poEchoCmd)
+
+ let env = newStringTable("NIMPB_BUILD_PLUGIN", "1", modeCaseSensitive)
+
+ let process = startProcess(command, workingDir="", args=args, env=env,
+ options=options)
+ var outp = outputStream(process)
+ var outputData: string = ""
+ var line = newStringOfCap(120)
+ while true:
+ if outp.readLine(line):
+ add(outputData, line)
+ add(outputData, "\n")
+ elif not running(process):
+ break
+ var rc = peekExitCode(process)
+ close(process)
+
+ if rc != 0:
+ echo(outputData)
+ quit(QuitFailure)
+ else:
+ verboseEcho(outputData)
+
+
+proc usage() {.noreturn.} =
+ echo(&"""
+{getAppFilename()} --out=OUTDIR [-IPATH [-IPATH]...] PROTOFILE...
+
+ --out The output directory for the generated files
+ -I Add a path to the set of include paths
+""")
+ quit(QuitFailure)
+
+when isMainModule:
+ if getEnv("NIMPB_BUILD_PLUGIN", "") == "1":
+ pluginMain()
+ quit(QuitSuccess)
+
+ var includes: seq[string] = @[]
+ var protos: seq[string] = @[]
+ var outdir: string
+ var verbose = false
+
+ if paramCount() == 0:
+ usage()
+
+ for idx in 1..paramCount():
+ let param = paramStr(idx)
+
+ if param.startsWith("-I"):
+ add(includes, param[2..^1])
+ elif param.startsWith("--out="):
+ outdir = param[6..^1]
+ elif param == "--verbose":
+ verbose = true
+ elif param == "--help":
+ usage()
+ else:
+ add(protos, param)
+
+ if outdir == nil:
+ echo("error: --out is required")
+ quit(QuitFailure)
+
+ if len(protos) == 0:
+ echo("error: no input files")
+ quit(QuitFailure)
+
+ compileProtos(protos, outdir, includes, verbose)
diff --git a/src/nimpb_buildpkg/descriptor_pb.nim b/src/nimpb_buildpkg/descriptor_pb.nim
new file mode 100644
index 0000000..923885f
--- /dev/null
+++ b/src/nimpb_buildpkg/descriptor_pb.nim
@@ -0,0 +1,368 @@
+import intsets
+
+import gen
+import nimpb/nimpb
+
+const
+ FileDescriptorSetDesc = MessageDesc(
+ name: "FileDescriptorSet",
+ fields: @[
+ FieldDesc(
+ name: "files",
+ number: 1,
+ ftype: FieldType.Message,
+ label: FieldLabel.Repeated,
+ typeName: "FileDescriptorProto",
+ packed: false,
+ oneofIdx: -1,
+ )
+ ]
+ )
+
+ FileDescriptorProtoDesc = MessageDesc(
+ name: "FileDescriptorProto",
+ fields: @[
+ FieldDesc(
+ name: "name",
+ number: 1,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "package",
+ number: 2,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "dependency",
+ number: 3,
+ ftype: FieldType.String,
+ label: FieldLabel.Repeated,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "message_type",
+ number: 4,
+ ftype: FieldType.Message,
+ label: FieldLabel.Repeated,
+ typeName: "DescriptorProto",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "enum_type",
+ number: 5,
+ ftype: FieldType.Message,
+ label: FieldLabel.Repeated,
+ typeName: "EnumDescriptorProto",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "syntax",
+ number: 12,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ ]
+ )
+
+ DescriptorProtoDesc = MessageDesc(
+ name: "DescriptorProto",
+ fields: @[
+ FieldDesc(
+ name: "name",
+ number: 1,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "field",
+ number: 2,
+ ftype: FieldType.Message,
+ label: FieldLabel.Repeated,
+ typeName: "FieldDescriptorProto",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "nested_type",
+ number: 3,
+ ftype: FieldType.Message,
+ label: FieldLabel.Repeated,
+ typeName: "DescriptorProto",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "enum_type",
+ number: 4,
+ ftype: FieldType.Message,
+ label: FieldLabel.Repeated,
+ typeName: "EnumDescriptorProto",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "oneof_decl",
+ number: 8,
+ ftype: FieldType.Message,
+ label: FieldLabel.Repeated,
+ typeName: "OneofDescriptorProto",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "options",
+ number: 7,
+ ftype: FieldType.Message,
+ label: FieldLabel.Optional,
+ typeName: "MessageOptions",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ ]
+ )
+
+ EnumDescriptorProtoDesc = MessageDesc(
+ name: "EnumDescriptorProto",
+ fields: @[
+ FieldDesc(
+ name: "name",
+ number: 1,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "value",
+ number: 2,
+ ftype: FieldType.Message,
+ label: FieldLabel.Repeated,
+ typeName: "EnumValueDescriptorProto",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ ]
+ )
+
+ EnumValueDescriptorProtoDesc = MessageDesc(
+ name: "EnumValueDescriptorProto",
+ fields: @[
+ FieldDesc(
+ name: "name",
+ number: 1,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "number",
+ number: 2,
+ ftype: FieldType.Int32,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ ]
+ )
+
+ FieldDescriptorProtoDesc = MessageDesc(
+ name: "FieldDescriptorProto",
+ fields: @[
+ FieldDesc(
+ name: "name",
+ number: 1,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "number",
+ number: 3,
+ ftype: FieldType.Int32,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "label",
+ number: 4,
+ ftype: FieldType.Enum,
+ label: FieldLabel.Optional,
+ typeName: "FieldDescriptorProto_Label",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "type",
+ number: 5,
+ ftype: FieldType.Enum,
+ label: FieldLabel.Optional,
+ typeName: "FieldDescriptorProto_Type",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "type_name",
+ number: 6,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "options",
+ number: 8,
+ ftype: FieldType.Message,
+ label: FieldLabel.Optional,
+ typeName: "FieldOptions",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "oneof_index",
+ number: 9,
+ ftype: FieldType.Int32,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ ]
+ )
+
+ FieldDescriptorProto_LabelDesc = EnumDesc(
+ name: "FieldDescriptorProto_Label",
+ values: @[
+ EnumValueDesc(name: "LABEL_OPTIONAL", number: 1),
+ EnumValueDesc(name: "LABEL_REQUIRED", number: 2),
+ EnumValueDesc(name: "LABEL_REPEATED", number: 3)
+ ]
+ )
+
+ FieldDescriptorProto_TypeDesc = EnumDesc(
+ name: "FieldDescriptorProto_Type",
+ values: @[
+ EnumValueDesc(name: "TYPE_DOUBLE", number: 1),
+ EnumValueDesc(name: "TYPE_FLOAT", number: 2),
+ EnumValueDesc(name: "TYPE_INT64", number: 3),
+ EnumValueDesc(name: "TYPE_UINT64", number: 4),
+ EnumValueDesc(name: "TYPE_INT32", number: 5),
+ EnumValueDesc(name: "TYPE_FIXED64", number: 6),
+ EnumValueDesc(name: "TYPE_FIXED32", number: 7),
+ EnumValueDesc(name: "TYPE_BOOL", number: 8),
+ EnumValueDesc(name: "TYPE_STRING", number: 9),
+ EnumValueDesc(name: "TYPE_GROUP", number: 10),
+ EnumValueDesc(name: "TYPE_MESSAGE", number: 11),
+ EnumValueDesc(name: "TYPE_BYTES", number: 12),
+ EnumValueDesc(name: "TYPE_UINT32", number: 13),
+ EnumValueDesc(name: "TYPE_ENUM", number: 14),
+ EnumValueDesc(name: "TYPE_SFIXED32", number: 15),
+ EnumValueDesc(name: "TYPE_SFIXED64", number: 16),
+ EnumValueDesc(name: "TYPE_SINT32", number: 17),
+ EnumValueDesc(name: "TYPE_SINT64", number: 18),
+ ]
+ )
+
+ MessageOptionsDesc = MessageDesc(
+ name: "MessageOptions",
+ fields: @[
+ FieldDesc(
+ name: "map_entry",
+ number: 7,
+ ftype: FieldType.Bool,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ ]
+ )
+
+ FieldOptionsDesc = MessageDesc(
+ name: "FieldOptions",
+ fields: @[
+ FieldDesc(
+ name: "packed",
+ number: 2,
+ ftype: FieldType.Bool,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ ]
+ )
+
+ OneofDescriptorProtoDesc = MessageDesc(
+ name: "OneofDescriptorProto",
+ fields: @[
+ FieldDesc(
+ name: "name",
+ number: 1,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ ]
+ )
+
+generateEnumType(FieldDescriptorProto_LabelDesc)
+generateEnumProcs(FieldDescriptorProto_LabelDesc)
+
+generateEnumType(FieldDescriptorProto_TypeDesc)
+generateEnumProcs(FieldDescriptorProto_TypeDesc)
+
+generateMessageType(EnumValueDescriptorProtoDesc)
+generateMessageProcs(EnumValueDescriptorProtoDesc)
+
+generateMessageType(EnumDescriptorProtoDesc)
+generateMessageProcs(EnumDescriptorProtoDesc)
+
+generateMessageType(FieldOptionsDesc)
+generateMessageProcs(FieldOptionsDesc)
+
+generateMessageType(FieldDescriptorProtoDesc)
+generateMessageProcs(FieldDescriptorProtoDesc)
+
+generateMessageType(OneofDescriptorProtoDesc)
+generateMessageProcs(OneofDescriptorProtoDesc)
+
+generateMessageType(MessageOptionsDesc)
+generateMessageProcs(MessageOptionsDesc)
+
+generateMessageType(DescriptorProtoDesc)
+generateMessageProcs(DescriptorProtoDesc)
+
+generateMessageType(FileDescriptorProtoDesc)
+generateMessageProcs(FileDescriptorProtoDesc)
+
+generateMessageType(FileDescriptorSetDesc)
+generateMessageProcs(FileDescriptorSetDesc)
diff --git a/src/nimpb_buildpkg/gen.nim b/src/nimpb_buildpkg/gen.nim
new file mode 100644
index 0000000..e954d03
--- /dev/null
+++ b/src/nimpb_buildpkg/gen.nim
@@ -0,0 +1,655 @@
+import macros
+import strutils
+
+import nimpb/nimpb
+
+type
+ MessageDesc* = object
+ name*: string
+ fields*: seq[FieldDesc]
+ oneofs*: seq[string]
+
+ FieldLabel* {.pure.} = enum
+ Optional = 1
+ Required
+ Repeated
+
+ FieldDesc* = object
+ name*: string
+ number*: int
+ ftype*: FieldType
+ label*: FieldLabel
+ typeName*: string
+ packed*: bool
+ oneofIdx*: int
+
+ EnumDesc* = object
+ name*: string
+ values*: seq[EnumValueDesc]
+
+ EnumValueDesc* = object
+ name*: string
+ number*: int
+
+proc findColonExpr(parent: NimNode, s: string): NimNode =
+ for child in parent:
+ if child.kind != nnkExprColonExpr:
+ continue
+
+ if $child[0] == s:
+ return child
+
+proc getMessageName(desc: NimNode): string =
+ let node = findColonExpr(desc, "name")
+ result = $node[1]
+
+iterator fields(desc: NimNode): NimNode =
+ let node = findColonExpr(desc, "fields")
+ for field in node[1]:
+ yield field
+
+proc isRepeated(field: NimNode): bool =
+ let node = findColonExpr(field, "label")
+ let value = FieldLabel(node[1].intVal)
+ result = value == FieldLabel.Repeated
+
+proc isPacked(field: NimNode): bool =
+ let node = findColonExpr(field, "packed")
+ result = bool(node[1].intVal)
+
+proc getFieldType(field: NimNode): FieldType =
+ let node = findColonExpr(field, "ftype")
+ result = FieldType(node[1].intVal)
+
+proc isMessage(field: NimNode): bool =
+ result = getFieldType(field) == FieldType.Message
+
+proc isEnum(field: NimNode): bool =
+ result = getFieldType(field) == FieldType.Enum
+
+proc getFieldTypeName(field: NimNode): string =
+ let node = findColonExpr(field, "typeName")
+ result = $node[1]
+
+proc getFieldTypeAsString(field: NimNode): string =
+ if isMessage(field) or isEnum(field):
+ result = getFieldTypeName(field)
+ else:
+ case getFieldType(field)
+ of FieldType.Double: result = "float64"
+ of FieldType.Float: result = "float32"
+ of FieldType.Int64: result = "int64"
+ of FieldType.UInt64: result = "uint64"
+ of FieldType.Int32: result = "int32"
+ of FieldType.Fixed64: result = "uint64"
+ of FieldType.Fixed32: result = "uint32"
+ of FieldType.Bool: result = "bool"
+ of FieldType.String: result = "string"
+ of FieldType.Bytes: result = "bytes"
+ of FieldType.UInt32: result = "uint32"
+ of FieldType.SFixed32: result = "int32"
+ of FieldType.SFixed64: result = "int64"
+ of FieldType.SInt32: result = "int32"
+ of FieldType.SInt64: result = "int64"
+ else: result = "AYBABTU"
+
+proc getFullFieldType(field: NimNode): NimNode =
+ result = ident(getFieldTypeAsString(field))
+ if isRepeated(field):
+ result = nnkBracketExpr.newTree(ident("seq"), result)
+
+proc getFieldName(field: NimNode): string =
+ let node = findColonExpr(field, "name")
+ result = $node[1]
+
+proc getFieldNumber(field: NimNode): int =
+ result = int(findColonExpr(field, "number")[1].intVal)
+
+proc defaultValue(field: NimNode): NimNode =
+ # TODO: check if there is a default value specified for the field
+
+ if isRepeated(field):
+ return nnkPrefix.newTree(newIdentNode("@"), nnkBracket.newTree())
+
+ case getFieldType(field)
+ of FieldType.Double: result = newLit(0.0'f64)
+ of FieldType.Float: result = newLit(0.0'f32)
+ of FieldType.Int64: result = newLit(0'i64)
+ of FieldType.UInt64: result = newLit(0'u64)
+ of FieldType.Int32: result = newLit(0'i32)
+ of FieldType.Fixed64: result = newLit(0'u64)
+ of FieldType.Fixed32: result = newLit(0'u32)
+ of FieldType.Bool: result = newLit(false)
+ of FieldType.String: result = newLit("")
+ of FieldType.Group: result = newLit("NOTIMPLEMENTED")
+ of FieldType.Message: result = newCall(ident("new" & getFieldTypeAsString(field)))
+ of FieldType.Bytes: result = newCall(ident("bytes"), newLit(""))
+ of FieldType.UInt32: result = newLit(0'u32)
+ of FieldType.Enum:
+ let
+ descId = ident(getFieldTypeAsString(field) & "Desc")
+ nameId = ident(getFieldTypeAsString(field))
+ result = quote do:
+ `nameId`(`descId`.values[0].number)
+ of FieldType.SFixed32: result = newLit(0'u32)
+ of FieldType.SFixed64: result = newLit(0'u32)
+ of FieldType.SInt32: result = newLit(0)
+ of FieldType.SInt64: result = newLit(0)
+
+proc wiretype(field: NimNode): WireType =
+ result = wiretype(getFieldType(field))
+
+# TODO: maybe not the best name for this
+proc getFieldNameAST(objname: NimNode, field: NimNode, oneof: string): NimNode =
+ result =
+ if oneof != "":
+ newDotExpr(newDotExpr(objname, ident(oneof)), ident(getFieldName(field)))
+ else:
+ newDotExpr(objname, ident(getFieldName(field)))
+
+proc fieldInitializer(objname: NimNode, field: NimNode, oneof: string): NimNode =
+ result = nnkAsgn.newTree(
+ getFieldNameAST(objname, field, oneof),
+ defaultValue(field)
+ )
+
+proc oneofIndex(field: NimNode): int =
+ let node = findColonExpr(field, "oneofIdx")
+ if node == nil:
+ result = -1
+ else:
+ result = int(node[1].intVal)
+
+proc oneofName(message, field: NimNode): string =
+ let index = oneofIndex(field)
+
+ if index == -1:
+ return ""
+
+ let oneofs = findColonExpr(message, "oneofs")[1]
+
+ result = $oneofs[index]
+
+iterator oneofFields(message: NimNode, index: int): NimNode =
+ if index != -1:
+ for field in fields(message):
+ if oneofIndex(field) == index:
+ yield field
+
+proc generateOneofFields*(desc: NimNode, typeSection: NimNode) =
+ let
+ oneofs = findColonExpr(desc, "oneofs")[1]
+ messageName = getMessageName(desc)
+
+ for index, oneof in oneofs:
+ let reclist = nnkRecList.newTree()
+
+ for field in oneofFields(desc, index):
+ let ftype = getFullFieldType(field)
+ let name = ident(getFieldName(field))
+
+ add(reclist, newIdentDefs(postfix(name, "*"), ftype))
+
+ let typedef = nnkTypeDef.newTree(
+ nnkPragmaExpr.newTree(
+ postfix(ident(messageName & $oneof), "*"),
+ nnkPragma.newTree(
+ ident("union")
+ )
+ ),
+ newEmptyNode(),
+ nnkObjectTy.newTree(
+ newEmptyNode(),
+ newEmptyNode(),
+ reclist
+ )
+ )
+
+ add(typeSection, typedef)
+
+macro generateMessageType*(desc: typed): typed =
+ let
+ impl = getImpl(symbol(desc))
+ typeSection = nnkTypeSection.newTree()
+ typedef = nnkTypeDef.newTree()
+ reclist = nnkRecList.newTree()
+ oneofs = findColonExpr(impl, "oneofs")[1]
+
+ let name = getMessageName(impl)
+
+ let typedefRef = nnkTypeDef.newTree(postfix(newIdentNode(name), "*"), newEmptyNode(),
+ nnkRefTy.newTree(newIdentNode(name & "Obj")))
+ add(typeSection, typedefRef)
+
+ add(typeSection, typedef)
+
+ add(typedef, postfix(ident(name & "Obj"), "*"))
+ add(typedef, newEmptyNode())
+ add(typedef, nnkObjectTy.newTree(newEmptyNode(), newEmptyNode(), reclist))
+
+ for field in fields(impl):
+ let ftype = getFullFieldType(field)
+ let name = ident(getFieldName(field))
+ if oneofIndex(field) == -1:
+ add(reclist, newIdentDefs(postfix(name, "*"), ftype))
+
+ for oneof in oneofs:
+ add(reclist, newIdentDefs(postfix(ident($oneof), "*"),
+ ident(name & $oneof)))
+
+ add(reclist, nnkIdentDefs.newTree(
+ ident("hasField"), ident("IntSet"), newEmptyNode()))
+
+ generateOneofFields(impl, typeSection)
+
+ result = newStmtList()
+ add(result, typeSection)
+
+ when defined(debug):
+ hint(repr(result))
+
+proc generateNewMessageProc(desc: NimNode): NimNode =
+ let
+ body = newStmtList(
+ newCall(ident("new"), ident("result"))
+ )
+ resultId = ident("result")
+
+ for field in fields(desc):
+ let oneofName = oneofName(desc, field)
+ add(body, fieldInitializer(resultId, field, oneofName))
+
+ add(body, newAssignment(newDotExpr(resultId, ident("hasField")),
+ newCall(ident("initIntSet"))))
+
+ result = newProc(postfix(ident("new" & getMessageName(desc)), "*"),
+ @[ident(getMessageName(desc))],
+ body)
+
+proc fieldProcName(prefix: string, field: NimNode): string =
+ result = prefix & capitalizeAscii(getFieldName(field))
+
+proc fieldProcIdent(prefix: string, field: NimNode): NimNode =
+ result = postfix(ident(fieldProcName(prefix, field)), "*")
+
+proc generateClearFieldProc(desc, field: NimNode): NimNode =
+ let
+ messageId = ident("message")
+ fname = getFieldNameAST(messageId, field, oneofName(desc, field))
+ defvalue = defaultValue(field)
+ hasField = newDotExpr(messageId, ident("hasField"))
+ number = getFieldNumber(field)
+ procName = fieldProcIdent("clear", field)
+ mtype = ident(getMessageName(desc))
+
+ result = quote do:
+ proc `procName`(`messageId`: `mtype`) =
+ `fname` = `defvalue`
+ excl(`hasfield`, `number`)
+
+ # When clearing a field that is contained in a oneof, we should also clear
+ # the other fields.
+ for sibling in oneofFields(desc, oneofIndex(field)):
+ if sibling == field:
+ continue
+ let
+ number = getFieldNumber(sibling)
+ exclNode = quote do:
+ excl(`hasField`, `number`)
+ add(body(result), exclNode)
+
+proc generateHasFieldProc(desc, field: NimNode): NimNode =
+ let
+ messageId = ident("message")
+ hasField = newDotExpr(messageId, ident("hasField"))
+ number = getFieldNumber(field)
+ mtype = ident(getMessageName(desc))
+ procName = fieldProcIdent("has", field)
+
+ result = quote do:
+ proc `procName`(`messageId`: `mtype`): bool =
+ contains(`hasfield`, `number`)
+
+proc generateSetFieldProc(desc, field: NimNode): NimNode =
+ let
+ messageId = ident("message")
+ hasField = newDotExpr(messageId, ident("hasField"))
+ number = getFieldNumber(field)
+ valueId = ident("value")
+ fname = getFieldNameAST(messageId, field, oneofName(desc, field))
+ procName = fieldProcIdent("set", field)
+ mtype = ident(getMessageName(desc))
+ ftype = getFullFieldType(field)
+
+ result = quote do:
+ proc `procName`(`messageId`: `mtype`, `valueId`: `ftype`) =
+ `fname` = `valueId`
+ incl(`hasfield`, `number`)
+
+ # When setting a field that is in a oneof, we need to unset the other fields
+ for sibling in oneofFields(desc, oneofIndex(field)):
+ if sibling == field:
+ continue
+ let
+ number = getFieldNumber(sibling)
+ exclNode = quote do:
+ excl(`hasField`, `number`)
+ add(body(result), exclNode)
+
+proc generateAddToFieldProc(desc, field: NimNode): NimNode =
+ let
+ procName = fieldProcIdent("add", field)
+ messageId = ident("message")
+ mtype = ident(getMessageName(desc))
+ valueId = ident("value")
+ ftype = ident(getFieldTypeAsString(field))
+ hasField = newDotExpr(messageId, ident("hasField"))
+ number = getFieldNumber(field)
+ fname = newDotExpr(messageId, ident(getFieldName(field)))
+
+ result = quote do:
+ proc `procName`(`messageId`: `mtype`, `valueId`: `ftype`) =
+ add(`fname`, `valueId`)
+ incl(`hasfield`, `number`)
+
+proc ident(wt: WireType): NimNode =
+ result = newDotExpr(ident("WireType"), ident($wt))
+
+proc genWriteField(message, field: NimNode): NimNode =
+ result = newStmtList()
+
+ let
+ number = getFieldNumber(field)
+ writer = ident("write" & getFieldTypeAsString(field))
+ messageId = ident("message")
+ fname = getFieldNameAST(messageId, field, oneofName(message, field))
+ wiretype = ident(wiretype(field))
+ sizeproc = ident("sizeOf" & getFieldTypeAsString(field))
+ hasproc = ident(fieldProcName("has", field))
+
+ if not isRepeated(field):
+ result.add quote do:
+ if `hasproc`(message):
+ writeTag(stream, `number`, `wiretype`)
+ `writer`(stream, `fname`)
+ if isMessage(field):
+ insert(result[0][0][1], 1, quote do:
+ writeVarint(stream, `sizeproc`(`fname`))
+ )
+ else:
+ let valueId = ident("value")
+ if isPacked(field):
+ result.add quote do:
+ writeTag(stream, `number`, WireType.LengthDelimited)
+ writeVarInt(stream, packedFieldSize(`fname`, `wiretype`))
+ for `valueId` in `fname`:
+ `writer`(stream, `valueId`)
+ else:
+ result.add quote do:
+ for `valueId` in `fname`:
+ writeTag(stream, `number`, `wiretype`)
+ `writer`(stream, `valueId`)
+ if isMessage(field):
+ insert(result[^1][^1], 1, quote do:
+ writeVarint(stream, `sizeproc`(`valueId`))
+ )
+
+proc generateWriteMessageProc(desc: NimNode): NimNode =
+ let
+ messageId = ident("message")
+ mtype = ident(getMessageName(desc))
+ procName = postfix(ident("write" & getMessageName(desc)), "*")
+ body = newStmtList()
+ stream = ident("stream")
+ sizeproc = postfix(ident("sizeOf" & getMessageName(desc)), "*")
+
+ for field in fields(desc):
+ add(body, genWriteField(desc, field))
+
+ result = quote do:
+ proc `sizeproc`(`messageId`: `mtype`): uint64
+
+ proc `procName`(`stream`: ProtobufStream, `messageId`: `mtype`) =
+ `body`
+
+proc generateReadMessageProc(desc: NimNode): NimNode =
+ let
+ procName = postfix(ident("read" & getMessageName(desc)), "*")
+ newproc = ident("new" & getMessageName(desc))
+ streamId = ident("stream")
+ mtype = ident(getMessageName(desc))
+ tagId = ident("tag")
+ wiretypeId = ident("wiretype")
+ resultId = ident("result")
+
+ result = quote do:
+ proc `procName`(`streamId`: ProtobufStream): `mtype` =
+ `resultId` = `newproc`()
+ while not atEnd(stream):
+ let
+ `tagId` = readTag(`streamId`)
+ `wiretypeId` = wireType(`tagId`)
+ case fieldNumber(`tagId`)
+ else:
+ skipField(`streamId`, `wiretypeId`)
+
+ let caseNode = body(result)[1][1][1]
+
+ # TODO: check wiretypes and fail if it doesn't match
+ for field in fields(desc):
+ let
+ number = getFieldNumber(field)
+ reader = ident("read" & getFieldTypeAsString(field))
+ setproc =
+ if isRepeated(field):
+ ident("add" & capitalizeAscii(getFieldName(field)))
+ else:
+ ident("set" & capitalizeAscii(getFieldName(field)))
+ if isRepeated(field):
+ if isNumeric(getFieldType(field)):
+ insert(caseNode, 1, nnkOfBranch.newTree(newLit(number), quote do:
+ if `wiretypeId` == WireType.LengthDelimited:
+ let
+ size = readVarint(stream)
+ start = getPosition(stream).uint64
+ var consumed = 0'u64
+ while consumed < size:
+ `setproc`(`resultId`, `reader`(stream))
+ consumed = getPosition(stream).uint64 - start
+ if consumed != size:
+ raise newException(Exception, "packed field size mismatch")
+ else:
+ `setproc`(`resultId`, `reader`(stream))
+ ))
+ elif isMessage(field):
+ insert(caseNode, 1, nnkOfBranch.newTree(newLit(number), quote do:
+ let size = readVarint(stream)
+ let data = readStr(stream, int(size))
+ let stream2 = newProtobufStream(newStringStream(data))
+ `setproc`(`resultId`, `reader`(stream2))
+ ))
+ else:
+ insert(caseNode, 1, nnkOfBranch.newTree(newLit(number), quote do:
+ `setproc`(`resultId`, `reader`(stream))
+ ))
+ else:
+ if isMessage(field):
+ insert(caseNode, 1, nnkOfBranch.newTree(newLit(number), quote do:
+ let size = readVarint(stream)
+ let data = readStr(stream, int(size))
+ let stream2 = newProtobufStream(newStringStream(data))
+ `setproc`(`resultId`, `reader`(stream2))
+ ))
+ else:
+ insert(caseNode, 1, nnkOfBranch.newTree(newLit(number), quote do:
+ `setproc`(`resultId`, `reader`(stream))
+ ))
+
+proc generateSizeOfMessageProc(desc: NimNode): NimNode =
+ let
+ name = getMessageName(desc)
+ body = newStmtList()
+ messageId = ident("message")
+ resultId = ident("result")
+ procName = postfix(ident("sizeOf" & getMessageName(desc)), "*")
+ mtype = ident(getMessageName(desc))
+
+ result = quote do:
+ proc `procName`(`messageId`: `mtype`): uint64 =
+ `resultId` = 0
+
+ let procBody = body(result)
+
+ for field in fields(desc):
+ let
+ hasproc = ident(fieldProcName("has", field))
+ sizeofproc = ident("sizeOf" & getFieldTypeAsString(field))
+ fname = getFieldNameAST(messageId, field, oneofName(desc, field))
+ number = getFieldNumber(field)
+ wiretype = ident(wiretype(field))
+
+ # TODO: packed
+ if isRepeated(field):
+ if isPacked(field):
+ procBody.add quote do:
+ if `hasproc`(`messageId`):
+ let
+ tagSize = sizeOfUint32(uint32(makeTag(`number`, WireType.LengthDelimited)))
+ dataSize = packedFieldSize(`fname`, `wiretype`)
+ sizeOfSize = sizeOfUint64(dataSize)
+ `resultId` = tagSize + dataSize + sizeOfSize
+ else:
+ procBody.add quote do:
+ for value in `fname`:
+ let
+ sizeOfField = `sizeofproc`(value)
+ tagSize = sizeOfUint32(uint32(makeTag(`number`, `wiretype`)))
+ `resultId` = `resultId` +
+ sizeOfField +
+ sizeOfUint64(sizeOfField) +
+ tagSize
+ else:
+ let sizeOfFieldId = ident("sizeOfField")
+
+ procBody.add quote do:
+ if `hasproc`(`messageId`):
+ let
+ `sizeOfFieldId` = `sizeofproc`(`fname`)
+ tagSize = sizeOfUint32(uint32(makeTag(`number`, `wiretype`)))
+ `resultId` = `resultId` + sizeOfField + tagSize
+
+ if isMessage(field):
+ # For messages we need to include the size of the encoded size
+ let asgn = procBody[^1][0][1][1]
+ asgn[1] = infix(asgn[1], "+", newCall(ident("sizeOfUint64"),
+ sizeOfFieldId))
+
+proc generateSerializeProc(desc: NimNode): NimNode =
+ let
+ mtype = ident(getMessageName(desc))
+ procName = postfix(ident("serialize"), "*")
+ writer = ident("write" & getMessageName(desc))
+ resultId = ident("result")
+
+ result = quote do:
+ proc `procName`(message: `mtype`): string =
+ let
+ ss = newStringStream()
+ pbs = newProtobufStream(ss)
+ `writer`(pbs, message)
+ `resultId` = ss.data
+
+proc generateDeserializeProc(desc: NimNode): NimNode =
+ let
+ mtype = ident(getMessageName(desc))
+ procName = postfix(ident("new" & getMessageName(desc)), "*")
+ reader = ident("read" & getMessageName(desc))
+ resultId = ident("result")
+
+ result = quote do:
+ proc `procName`(data: string): `mtype` =
+ let
+ ss = newStringStream(data)
+ pbs = newProtobufStream(ss)
+ `resultId` = `reader`(pbs)
+
+macro generateMessageProcs*(x: typed): typed =
+ let
+ desc = getImpl(symbol(x))
+
+ result = newStmtList(
+ generateNewMessageProc(desc),
+ )
+
+ for field in fields(desc):
+ add(result, generateClearFieldProc(desc, field))
+ add(result, generateHasFieldProc(desc, field))
+ add(result, generateSetFieldProc(desc, field))
+
+ if isRepeated(field):
+ add(result, generateAddToFieldProc(desc, field))
+
+ add(result, generateWriteMessageProc(desc))
+ add(result, generateReadMessageProc(desc))
+ add(result, generateSizeOfMessageProc(desc))
+ add(result, generateSerializeProc(desc))
+ add(result, generateDeserializeProc(desc))
+
+ when defined(debug):
+ hint(repr(result))
+
+macro generateEnumType*(x: typed): typed =
+ let
+ impl = getImpl(symbol(x))
+ name = $findColonExpr(impl, "name")[1]
+ values = findColonExpr(impl, "values")[1]
+
+ let enumTy = nnkEnumTy.newTree(newEmptyNode())
+
+ for valueNode in values:
+ let
+ name = $findColonExpr(valueNode, "name")[1]
+ number = findColonExpr(valueNode, "number")[1]
+
+ add(enumTy, nnkEnumFieldDef.newTree(ident(name), number))
+
+ result = newStmtList(nnkTypeSection.newTree(
+ nnkTypeDef.newTree(
+ nnkPragmaExpr.newTree(
+ postfix(ident(name), "*"),
+ nnkPragma.newTree(ident("pure"))
+ ),
+ newEmptyNode(),
+ enumTy
+ )
+ ))
+
+ when defined(debug):
+ hint(repr(result))
+
+macro generateEnumProcs*(x: typed): typed =
+ let
+ impl = getImpl(symbol(x))
+ name = $findColonExpr(impl, "name")[1]
+ nameId = ident(name)
+ values = findColonExpr(impl, "values")[1]
+ readProc = postfix(ident("read" & name), "*")
+ writeProc = postfix(ident("write" & name), "*")
+ sizeProc = postfix(ident("sizeOf" & name), "*")
+ resultId = ident("result")
+
+ result = newStmtList()
+
+ add(result, quote do:
+ proc `readProc`(stream: ProtobufStream): `nameId` =
+ `resultId` = `nameId`(readUInt32(stream))
+
+ proc `writeProc`(stream: ProtobufStream, value: `nameId`) =
+ writeEnum(stream, value)
+
+ proc `sizeProc`(value: `nameId`): uint64 =
+ `resultId` = sizeOfUInt32(uint32(value))
+ )
+
+ when defined(debug):
+ hint(repr(result))
diff --git a/src/nimpb_buildpkg/plugin.nim b/src/nimpb_buildpkg/plugin.nim
new file mode 100644
index 0000000..bf50559
--- /dev/null
+++ b/src/nimpb_buildpkg/plugin.nim
@@ -0,0 +1,923 @@
+import algorithm
+import os
+import pegs
+import sequtils
+import sets
+import strformat
+import strutils
+import tables
+
+import descriptor_pb
+import plugin_pb
+
+import nimpb/nimpb
+
+import gen
+
+type
+ Names = distinct seq[string]
+
+ Enum = ref object
+ names: Names
+ values: seq[tuple[name: string, number: int]]
+
+ Field = ref object
+ number: int
+ name: string
+ label: FieldDescriptorProto_Label
+ ftype: FieldDescriptorProto_Type
+ typeName: string
+ packed: bool
+ oneof: Oneof
+ mapEntry: Message
+
+ Message = ref object
+ names: Names
+ fields: seq[Field]
+ oneofs: seq[Oneof]
+ mapEntry: bool
+
+ Oneof = ref object
+ name: string
+ fields: seq[Field]
+
+ ProcessedFile = ref object
+ name: string
+ data: string
+
+ ProtoFile = ref object
+ fdesc: FileDescriptorProto
+ enums: seq[Enum]
+ messages: seq[Message]
+ syntax: Syntax
+
+ Syntax {.pure.} = enum
+ Proto2
+ Proto3
+
+when defined(debug):
+ proc log(msg: string) =
+ stderr.write(msg)
+ stderr.write("\n")
+else:
+ proc log(msg: string) = discard
+
+proc initNamesFromTypeName(typename: string): Names =
+ if typename[0] != '.':
+ raise newException(Exception, "relative names not supported")
+ let parts = split(typename[1..^1], ".")
+ result = Names(parts)
+
+proc `$`(names: Names): string =
+ let n = seq[string](names)
+ result = join(n, "_")
+
+proc add(names: var Names, s: string) =
+ add(seq[string](names), s)
+
+proc `&`(names: Names, s: string): Names =
+ result = names
+ add(result, s)
+
+proc isRepeated(field: Field): bool =
+ result = field.label == FieldDescriptorProtoLabel.LabelRepeated
+
+proc isMessage(field: Field): bool =
+ result = field.ftype == FieldDescriptorProtoType.TypeMessage
+
+proc isEnum(field: Field): bool =
+ result = field.ftype == FieldDescriptorProtoType.TypeEnum
+
+proc isNumeric(field: Field): bool =
+ case field.ftype
+ of FieldDescriptorProtoType.TypeDouble, FieldDescriptorProtoType.TypeFloat,
+ FieldDescriptorProtoType.TypeInt64, FieldDescriptorProtoType.TypeUInt64,
+ FieldDescriptorProtoType.TypeInt32, FieldDescriptorProtoType.TypeFixed64,
+ FieldDescriptorProtoType.TypeFixed32, FieldDescriptorProtoType.TypeBool,
+ FieldDescriptorProtoType.TypeUInt32, FieldDescriptorProtoType.TypeEnum,
+ FieldDescriptorProtoType.TypeSFixed32, FieldDescriptorProtoType.TypeSFixed64,
+ FieldDescriptorProtoType.TypeSInt32, FieldDescriptorProtoType.TypeSInt64:
+ result = true
+ else: discard
+
+proc isMapEntry(message: Message): bool =
+ result = message.mapEntry
+
+proc isMapEntry(field: Field): bool =
+ result = field.mapEntry != nil
+
+proc nimTypeName(field: Field): string =
+ case field.ftype
+ of FieldDescriptorProtoType.TypeDouble: result = "float64"
+ of FieldDescriptorProtoType.TypeFloat: result = "float32"
+ of FieldDescriptorProtoType.TypeInt64: result = "int64"
+ of FieldDescriptorProtoType.TypeUInt64: result = "uint64"
+ of FieldDescriptorProtoType.TypeInt32: result = "int32"
+ of FieldDescriptorProtoType.TypeFixed64: result = "uint64"
+ of FieldDescriptorProtoType.TypeFixed32: result = "uint32"
+ of FieldDescriptorProtoType.TypeBool: result = "bool"
+ of FieldDescriptorProtoType.TypeString: result = "string"
+ of FieldDescriptorProtoType.TypeGroup: result = ""
+ of FieldDescriptorProtoType.TypeMessage: result = field.typeName
+ of FieldDescriptorProtoType.TypeBytes: result = "bytes"
+ of FieldDescriptorProtoType.TypeUInt32: result = "uint32"
+ of FieldDescriptorProtoType.TypeEnum: result = field.typeName
+ of FieldDescriptorProtoType.TypeSFixed32: result = "int32"
+ of FieldDescriptorProtoType.TypeSFixed64: result = "int64"
+ of FieldDescriptorProtoType.TypeSInt32: result = "int32"
+ of FieldDescriptorProtoType.TypeSInt64: result = "int64"
+
+proc mapKeyType(field: Field): string =
+ for f in field.mapEntry.fields:
+ if f.name == "key":
+ return f.nimTypeName
+
+proc mapValueType(field: Field): string =
+ for f in field.mapEntry.fields:
+ if f.name == "value":
+ return f.nimTypeName
+
+proc `$`(ft: FieldDescriptorProtoType): string =
+ case ft
+ of FieldDescriptorProtoType.TypeDouble: result = "Double"
+ of FieldDescriptorProtoType.TypeFloat: result = "Float"
+ of FieldDescriptorProtoType.TypeInt64: result = "Int64"
+ of FieldDescriptorProtoType.TypeUInt64: result = "UInt64"
+ of FieldDescriptorProtoType.TypeInt32: result = "Int32"
+ of FieldDescriptorProtoType.TypeFixed64: result = "Fixed64"
+ of FieldDescriptorProtoType.TypeFixed32: result = "Fixed32"
+ of FieldDescriptorProtoType.TypeBool: result = "Bool"
+ of FieldDescriptorProtoType.TypeString: result = "String"
+ of FieldDescriptorProtoType.TypeGroup: result = "Group"
+ of FieldDescriptorProtoType.TypeMessage: result = "Message"
+ of FieldDescriptorProtoType.TypeBytes: result = "Bytes"
+ of FieldDescriptorProtoType.TypeUInt32: result = "UInt32"
+ of FieldDescriptorProtoType.TypeEnum: result = "Enum"
+ of FieldDescriptorProtoType.TypeSFixed32: result = "SFixed32"
+ of FieldDescriptorProtoType.TypeSFixed64: result = "SFixed64"
+ of FieldDescriptorProtoType.TypeSInt32: result = "SInt32"
+ of FieldDescriptorProtoType.TypeSInt64: result = "SInt64"
+
+proc defaultValue(field: Field): string =
+ if isMapEntry(field):
+ return &"newTable[{field.mapKeyType}, {field.mapValueType}]()"
+ elif isRepeated(field):
+ return "@[]"
+
+ case field.ftype
+ of FieldDescriptorProtoType.TypeDouble: result = "0"
+ of FieldDescriptorProtoType.TypeFloat: result = "0"
+ of FieldDescriptorProtoType.TypeInt64: result = "0"
+ of FieldDescriptorProtoType.TypeUInt64: result = "0"
+ of FieldDescriptorProtoType.TypeInt32: result = "0"
+ of FieldDescriptorProtoType.TypeFixed64: result = "0"
+ of FieldDescriptorProtoType.TypeFixed32: result = "0"
+ of FieldDescriptorProtoType.TypeBool: result = "false"
+ of FieldDescriptorProtoType.TypeString: result = "\"\""
+ of FieldDescriptorProtoType.TypeGroup: result = ""
+ of FieldDescriptorProtoType.TypeMessage: result = "nil"
+ of FieldDescriptorProtoType.TypeBytes: result = "bytes(\"\")"
+ of FieldDescriptorProtoType.TypeUInt32: result = "0"
+ of FieldDescriptorProtoType.TypeEnum: result = &"{field.typeName}(0)"
+ of FieldDescriptorProtoType.TypeSFixed32: result = "0"
+ of FieldDescriptorProtoType.TypeSFixed64: result = "0"
+ of FieldDescriptorProtoType.TypeSInt32: result = "0"
+ of FieldDescriptorProtoType.TypeSInt64: result = "0"
+
+proc wiretypeStr(field: Field): string =
+ result = "WireType."
+ case field.ftype
+ of FieldDescriptorProtoType.TypeDouble: result &= "Fixed64"
+ of FieldDescriptorProtoType.TypeFloat: result &= "Fixed32"
+ of FieldDescriptorProtoType.TypeInt64: result &= "Varint"
+ of FieldDescriptorProtoType.TypeUInt64: result &= "Varint"
+ of FieldDescriptorProtoType.TypeInt32: result &= "Varint"
+ of FieldDescriptorProtoType.TypeFixed64: result &= "Fixed64"
+ of FieldDescriptorProtoType.TypeFixed32: result &= "Fixed32"
+ of FieldDescriptorProtoType.TypeBool: result &= "Varint"
+ of FieldDescriptorProtoType.TypeString: result &= "LengthDelimited"
+ of FieldDescriptorProtoType.TypeGroup: result &= ""
+ of FieldDescriptorProtoType.TypeMessage: result &= "LengthDelimited"
+ of FieldDescriptorProtoType.TypeBytes: result &= "LengthDelimited"
+ of FieldDescriptorProtoType.TypeUInt32: result &= "Varint"
+ of FieldDescriptorProtoType.TypeEnum: result &= &"Varint"
+ of FieldDescriptorProtoType.TypeSFixed32: result &= "Fixed32"
+ of FieldDescriptorProtoType.TypeSFixed64: result &= "Fixed64"
+ of FieldDescriptorProtoType.TypeSInt32: result &= "Varint"
+ of FieldDescriptorProtoType.TypeSInt64: result &= "Varint"
+
+proc fieldTypeStr(field: Field): string =
+ result = "FieldType." & $field.ftype
+
+proc isKeyword(s: string): bool =
+ case s
+ of "addr", "and", "as", "asm", "bind", "block", "break", "case", "cast",
+ "concept", "const", "continue", "converter", "defer", "discard",
+ "distinct", "div", "do", "elif", "else", "end", "enum", "except",
+ "export", "finally", "for", "from", "func", "if", "import", "in",
+ "include", "interface", "is", "isnot", "iterator", "let", "macro",
+ "method", "mixin", "mod", "nil", "not", "notin", "object", "of", "or",
+ "out", "proc", "ptr", "raise", "ref", "return", "shl", "shr", "static",
+ "template", "try", "tuple", "type", "using", "var", "when", "while",
+ "xor", "yield":
+ result = true
+ else:
+ result = false
+
+proc writeProc(field: Field): string =
+ if isMapEntry(field):
+ result = &"write{field.typeName}KV"
+ elif isMessage(field):
+ result = "writeMessage"
+ elif isEnum(field):
+ result = "writeEnum"
+ else:
+ result = &"write{field.typeName}"
+
+proc readProc(field: Field): string =
+ if isMapEntry(field):
+ result = &"read{field.typeName}KV"
+ elif isEnum(field):
+ result = &"readEnum[{field.typeName}]"
+ else:
+ result = &"read{field.typeName}"
+
+proc sizeOfProc(field: Field): string =
+ if isMapEntry(field):
+ result = &"sizeOf{field.typeName}KV"
+ elif isEnum(field):
+ result = &"sizeOfEnum[{field.typeName}]"
+ else:
+ result = &"sizeOf{field.typeName}"
+
+proc newField(file: ProtoFile, message: Message, desc: FieldDescriptorProto): Field =
+ new(result)
+
+ result.name = desc.name
+ result.number = desc.number
+ result.label = desc.label
+ result.ftype = desc.type
+ result.typeName = ""
+ result.packed = false
+ result.mapEntry = nil
+
+ # Identifiers cannot start/end with underscore
+ removePrefix(result.name, '_')
+ removeSuffix(result.name, '_')
+
+ # Consecutive underscores are not allowed
+ result.name = replace(result.name, peg"'_' '_'+", "_")
+
+ if isKeyword(result.name):
+ result.name = "f" & result.name
+
+ if isRepeated(result) and isNumeric(result):
+ if hasOptions(desc):
+ if hasPacked(desc.options):
+ result.packed = desc.options.packed
+ else:
+ result.packed =
+ if file.syntax == Syntax.Proto2:
+ false
+ else:
+ true
+ else:
+ result.packed =
+ if file.syntax == Syntax.Proto2:
+ false
+ else:
+ true
+
+ if hasOneof_index(desc):
+ result.oneof = message.oneofs[desc.oneof_index]
+ add(result.oneof.fields, result)
+
+ if isMessage(result) or isEnum(result):
+ result.typeName = $initNamesFromTypeName(desc.type_name)
+ else:
+ result.typeName = $result.ftype
+
+ log(&"newField {result.name} {$result.ftype} {result.typeName} PACKED={result.packed} SYNTAX={file.syntax}")
+
+proc newOneof(name: string): Oneof =
+ new(result)
+ result.fields = @[]
+ result.name = name
+
+proc newMessage(file: ProtoFile, names: Names, desc: DescriptorProto): Message =
+ new(result)
+
+ result.names = names
+ result.fields = @[]
+ result.oneofs = @[]
+ result.mapEntry = false
+
+ if hasMapEntry(desc.options):
+ result.mapEntry = desc.options.mapEntry
+
+ log(&"newMessage {$result.names}")
+
+ for oneof in desc.oneof_decl:
+ add(result.oneofs, newOneof(oneof.name))
+
+ for field in desc.field:
+ add(result.fields, newField(file, result, field))
+
+proc fixMapEntry(file: ProtoFile, message: Message): bool =
+ for field in message.fields:
+ for msg in file.messages:
+ if $msg.names == field.typeName:
+ if msg.mapEntry:
+ log(&"fixing map {field.name} {msg.names}")
+ field.mapEntry = msg
+ result = true
+
+proc newEnum(names: Names, desc: EnumDescriptorProto): Enum =
+ new(result)
+
+ result.names = names & desc.name
+ result.values = @[]
+
+ log(&"newEnum {$result.names}")
+
+ for value in desc.value:
+ add(result.values, (value.name, int(value.number)))
+
+ type EnumValue = tuple[name: string, number: int]
+
+ sort(result.values, proc (x, y: EnumValue): int =
+ system.cmp(x.number, y.number)
+ )
+
+iterator messages(desc: DescriptorProto, names: Names): tuple[names: Names, desc: DescriptorProto] =
+ var stack: seq[tuple[names: Names, desc: DescriptorProto]] = @[]
+
+ for nested in desc.nested_type:
+ add(stack, (names, nested))
+
+ while len(stack) > 0:
+ let (names, submsg) = pop(stack)
+
+ let subnames = names & submsg.name
+ yield (subnames, submsg)
+
+ for desc in submsg.nested_type:
+ add(stack, (subnames, desc))
+
+iterator messages(fdesc: FileDescriptorProto, names: Names): tuple[names: Names, desc: DescriptorProto] =
+ for desc in fdesc.message_type:
+ let subnames = names & desc.name
+ yield (subnames, desc)
+
+ for x in messages(desc, subnames):
+ yield x
+
+proc quoteReserved(name: string): string =
+ case name
+ of "type": result = &"`{name}`"
+ else: result = name
+
+proc accessor(field: Field): string =
+ if field.oneof != nil:
+ result = &"{field.oneof.name}.{quoteReserved(field.name)}"
+ else:
+ result = quoteReserved(field.name)
+
+proc dependencies(field: Field): seq[string] =
+ result = @[]
+
+ if isMessage(field) or isEnum(field):
+ add(result, field.typeName)
+
+proc dependencies(message: Message): seq[string] =
+ result = @[]
+
+ for field in message.fields:
+ add(result, dependencies(field))
+
+proc toposort(graph: TableRef[string, HashSet[string]]): seq[string] =
+ type State = enum Unknown, Gray, Black
+
+ var
+ enter = toSeq(keys(graph))
+ state = newTable[string, State]()
+ order: seq[string] = @[]
+
+ proc dfs(node: string) =
+ state[node] = Gray
+ if node in graph:
+ for k in graph[node]:
+ let sk =
+ if k in state:
+ state[k]
+ else:
+ Unknown
+
+ if sk == Gray:
+ # cycle
+ continue
+ elif sk == Black:
+ continue
+
+ let idx = find(enter, k)
+ if idx != -1:
+ delete(enter, idx)
+
+ dfs(k)
+ insert(order, node, 0)
+ state[node] = Black
+
+ while len(enter) > 0:
+ dfs(pop(enter))
+
+ result = order
+
+iterator sortDependencies(messages: seq[Message]): Message =
+ let
+ deps = newTable[string, HashSet[string]]()
+ byname = newTable[string, Message]()
+
+ for message in messages:
+ deps[$message.names] = toSet(dependencies(message))
+ byname[$message.names] = message
+
+ let order = reversed(toposort(deps))
+
+ for name in order:
+ if name in byname:
+ yield byname[name]
+
+proc parseFile(name: string, fdesc: FileDescriptorProto): ProtoFile =
+ log(&"parsing {name}")
+
+ new(result)
+
+ result.fdesc = fdesc
+ result.messages = @[]
+ result.enums = @[]
+
+ if hasSyntax(fdesc):
+ if fdesc.syntax == "proto2":
+ result.syntax = Syntax.Proto2
+ elif fdesc.syntax == "proto3":
+ result.syntax = Syntax.Proto3
+ else:
+ raise newException(Exception, "unrecognized syntax: " & fdesc.syntax)
+ else:
+ result.syntax = Syntax.Proto2
+
+ let basename =
+ if hasPackage(fdesc):
+ Names(split(fdesc.package, "."))
+ else:
+ Names(@[])
+
+ for e in fdesc.enum_type:
+ add(result.enums, newEnum(basename, e))
+
+ for name, message in messages(fdesc, basename):
+ add(result.messages, newMessage(result, name, message))
+
+ for e in message.enum_type:
+ add(result.enums, newEnum(name, e))
+
+proc addLine(s: var string, line: string) =
+ if not isNilOrWhitespace(line):
+ s &= line
+ s &= "\n"
+
+iterator genType(e: Enum): string =
+ yield &"{e.names}* {{.pure.}} = enum"
+ for item in e.values:
+ let (name, number) = item
+ yield indent(&"{name} = {number}", 4)
+
+proc fullType(field: Field): string =
+ if isMapEntry(field):
+ result = &"TableRef[{field.mapKeyType}, {field.mapValueType}]"
+ else:
+ result = field.nimTypeName
+ if isRepeated(field):
+ result = &"seq[{result}]"
+
+proc mapKeyField(message: Message): Field =
+ for field in message.fields:
+ if field.name == "key":
+ return field
+
+proc mapValueField(message: Message): Field =
+ for field in message.fields:
+ if field.name == "value":
+ return field
+
+iterator genType(message: Message): string =
+ if not isMapEntry(message):
+ yield &"{message.names}* = ref {message.names}Obj"
+ yield &"{message.names}Obj* = object of RootObj"
+ yield indent(&"hasField: IntSet", 4)
+
+ for field in message.fields:
+ if isMapEntry(field):
+ yield indent(&"{field.name}: TableRef[{mapKeyType(field)}, {mapValueType(field)}]", 4)
+ elif field.oneof == nil:
+ yield indent(&"{quoteReserved(field.name)}: {field.fullType}", 4)
+
+ for oneof in message.oneofs:
+ yield indent(&"{oneof.name}: {message.names}_{oneof.name}_OneOf", 4)
+
+ for oneof in message.oneofs:
+ yield ""
+ yield &"{message.names}_{oneof.name}_OneOf* {{.union.}} = object"
+ for field in oneof.fields:
+ yield indent(&"{quoteReserved(field.name)}: {field.fullType}", 4)
+
+iterator genNewMessageProc(msg: Message): string =
+ yield &"proc new{msg.names}*(): {msg.names} ="
+ yield indent("new(result)", 4)
+ yield indent("result.hasField = initIntSet()", 4)
+ for field in msg.fields:
+ yield indent(&"result.{field.accessor} = {defaultValue(field)}", 4)
+ yield ""
+
+iterator oneofSiblings(field: Field): Field =
+ if field.oneof != nil:
+ for sibling in field.oneof.fields:
+ if sibling == field:
+ continue
+ yield sibling
+
+iterator genClearFieldProc(msg: Message, field: Field): string =
+ yield &"proc clear{field.name}*(message: {msg.names}) ="
+ yield indent(&"message.{field.accessor} = {defaultValue(field)}", 4)
+ var numbers: seq[int] = @[field.number]
+ for sibling in oneofSiblings(field):
+ add(numbers, sibling.number)
+ yield indent(&"excl(message.hasField, [{join(numbers, \", \")}])", 4)
+ yield ""
+
+iterator genHasFieldProc(msg: Message, field: Field): string =
+ yield &"proc has{field.name}*(message: {msg.names}): bool ="
+ var check = indent(&"result = contains(message.hasField, {field.number})", 4)
+ if isRepeated(field) or isMapEntry(field):
+ check = &"{check} or (len(message.{field.accessor}) > 0)"
+ yield check
+ yield ""
+
+iterator genSetFieldProc(msg: Message, field: Field): string =
+ yield &"proc set{field.name}*(message: {msg.names}, value: {field.fullType}) ="
+ yield indent(&"message.{field.accessor} = value", 4)
+ yield indent(&"incl(message.hasField, {field.number})", 4)
+ var numbers: seq[int] = @[]
+ for sibling in oneofSiblings(field):
+ add(numbers, sibling.number)
+ if len(numbers) > 0:
+ yield indent(&"excl(message.hasField, [{join(numbers, \", \")}])", 4)
+ yield ""
+
+iterator genAddToFieldProc(msg: Message, field: Field): string =
+ yield &"proc add{field.name}*(message: {msg.names}, value: {field.nimTypeName}) ="
+ yield indent(&"add(message.{field.name}, value)", 4)
+ yield indent(&"incl(message.hasField, {field.number})", 4)
+ yield ""
+
+iterator genFieldAccessorProcs(msg: Message, field: Field): string =
+ yield &"proc {quoteReserved(field.name)}*(message: {msg.names}): {field.fullType} {{.inline.}} ="
+ yield indent(&"message.{field.accessor}", 4)
+ yield ""
+
+ yield &"proc `{field.name}=`*(message: {msg.names}, value: {field.fullType}) {{.inline.}} ="
+ yield indent(&"set{field.name}(message, value)", 4)
+ yield ""
+
+iterator genWriteMapKVProc(msg: Message): string =
+ let
+ key = mapKeyField(msg)
+ value = mapValueField(msg)
+
+ yield &"proc write{msg.names}KV(stream: ProtobufStream, key: {key.fullType}, value: {value.fullType}) ="
+ yield indent(&"{key.writeProc}(stream, key, {key.number})", 4)
+ yield indent(&"{value.writeProc}(stream, value, {value.number})", 4)
+ yield ""
+
+iterator genWriteMessageProc(msg: Message): string =
+ yield &"proc write{msg.names}*(stream: ProtobufStream, message: {msg.names}) ="
+ for field in msg.fields:
+ if isMapEntry(field):
+ yield indent(&"for key, value in message.{field.name}:", 4)
+ yield indent(&"writeTag(stream, {field.number}, {wiretypeStr(field)})", 8)
+ yield indent(&"writeVarint(stream, {field.sizeOfProc}(key, value))", 8)
+ yield indent(&"{field.writeProc}(stream, key, value)", 8)
+ elif isRepeated(field):
+ if field.packed:
+ yield indent(&"if has{field.name}(message):", 4)
+ yield indent(&"writeTag(stream, {field.number}, WireType.LengthDelimited)", 8)
+ yield indent(&"writeVarint(stream, packedFieldSize(message.{field.name}, {field.fieldTypeStr}))", 8)
+ yield indent(&"for value in message.{field.name}:", 8)
+ yield indent(&"{field.writeProc}(stream, value)", 12)
+ else:
+ yield indent(&"for value in message.{field.name}:", 4)
+ yield indent(&"{field.writeProc}(stream, value, {field.number})", 8)
+ else:
+ yield indent(&"if has{field.name}(message):", 4)
+ yield indent(&"{field.writeProc}(stream, message.{field.accessor}, {field.number})", 8)
+
+ if len(msg.fields) == 0:
+ yield indent("discard", 4)
+
+ yield ""
+
+iterator genReadMapKVProc(msg: Message): string =
+ let
+ key = mapKeyField(msg)
+ value = mapValueField(msg)
+
+ yield &"proc read{msg.names}KV(stream: ProtobufStream, tbl: TableRef[{key.fullType}, {value.fullType}]) ="
+
+ yield indent(&"var", 4)
+ yield indent(&"key: {key.fullType}", 8)
+ yield indent("gotKey = false", 8)
+ yield indent(&"value: {value.fullType}", 8)
+ yield indent("gotValue = false", 8)
+ yield indent("while not atEnd(stream):", 4)
+ yield indent("let", 8)
+ yield indent("tag = readTag(stream)", 12)
+ yield indent("wireType = wireType(tag)", 12)
+ yield indent("case fieldNumber(tag)", 8)
+ yield indent(&"of {key.number}:", 8)
+ yield indent(&"key = {key.readProc}(stream)", 12)
+ yield indent("gotKey = true", 12)
+ yield indent(&"of {value.number}:", 8)
+ if isMessage(value):
+ yield indent("let", 12)
+ yield indent("size = readVarint(stream)", 16)
+ yield indent("data = safeReadStr(stream, int(size))", 16)
+ yield indent("pbs = newProtobufStream(newStringStream(data))", 16)
+ yield indent(&"value = {value.readProc}(pbs)", 12)
+ else:
+ yield indent(&"value = {value.readProc}(stream)", 12)
+ yield indent("gotValue = true", 12)
+ yield indent("else: skipField(stream, wireType)", 8)
+ yield indent("if not gotKey:", 4)
+ yield indent(&"raise newException(Exception, \"missing key\")", 8)
+ yield indent("if not gotValue:", 4)
+ yield indent(&"raise newException(Exception, \"missing value\")", 8)
+ yield indent("tbl[key] = value", 4)
+ yield ""
+
+iterator genReadMessageProc(msg: Message): string =
+ yield &"proc read{msg.names}*(stream: ProtobufStream): {msg.names} ="
+ yield indent(&"result = new{msg.names}()", 4)
+ if len(msg.fields) > 0:
+ yield indent("while not atEnd(stream):", 4)
+ yield indent("let", 8)
+ yield indent("tag = readTag(stream)", 12)
+ yield indent("wireType = wireType(tag)", 12)
+ yield indent("case fieldNumber(tag)", 8)
+ yield indent("of 0:", 8)
+ yield indent("raise newException(InvalidFieldNumberError, \"Invalid field number: 0\")", 12)
+ for field in msg.fields:
+ let
+ setter =
+ if isRepeated(field):
+ &"add{field.name}"
+ else:
+ &"set{field.name}"
+ yield indent(&"of {field.number}:", 8)
+ if isRepeated(field):
+ if isMapEntry(field):
+ yield indent(&"expectWireType(wireType, {field.wiretypeStr})", 12)
+ yield indent("let", 12)
+ yield indent("size = readVarint(stream)", 16)
+ yield indent("data = safeReadStr(stream, int(size))", 16)
+ yield indent("pbs = newProtobufStream(newStringStream(data))", 16)
+ yield indent(&"{field.readProc}(pbs, result.{field.name})", 12)
+ elif isNumeric(field):
+ yield indent(&"expectWireType(wireType, {field.wiretypeStr}, WireType.LengthDelimited)", 12)
+ yield indent("if wireType == WireType.LengthDelimited:", 12)
+ yield indent("let", 16)
+ yield indent("size = readVarint(stream)", 20)
+ yield indent("start = uint64(getPosition(stream))", 20)
+ yield indent("var consumed = 0'u64", 16)
+ yield indent("while consumed < size:", 16)
+ yield indent(&"{setter}(result, {field.readProc}(stream))", 20)
+ yield indent("consumed = uint64(getPosition(stream)) - start", 20)
+ yield indent("if consumed != size:", 16)
+ yield indent("raise newException(Exception, \"packed field size mismatch\")", 20)
+ yield indent("else:", 12)
+ yield indent(&"{setter}(result, {field.readProc}(stream))", 16)
+ elif isMessage(field):
+ yield indent(&"expectWireType(wireType, {field.wiretypeStr})", 12)
+ yield indent("let", 12)
+ yield indent("size = readVarint(stream)", 16)
+ yield indent("data = safeReadStr(stream, int(size))", 16)
+ yield indent("pbs = newProtobufStream(newStringStream(data))", 16)
+ yield indent(&"{setter}(result, {field.readProc}(pbs))", 12)
+ else:
+ yield indent(&"expectWireType(wireType, {field.wiretypeStr})", 12)
+ yield indent(&"{setter}(result, {field.readProc}(stream))", 12)
+ else:
+ yield indent(&"expectWireType(wireType, {field.wiretypeStr})", 12)
+ if isMessage(field):
+ yield indent("let", 12)
+ yield indent("size = readVarint(stream)", 16)
+ yield indent("data = safeReadStr(stream, int(size))", 16)
+ yield indent("pbs = newProtobufStream(newStringStream(data))", 16)
+ yield indent(&"{setter}(result, {field.readProc}(pbs))", 12)
+ else:
+ yield indent(&"{setter}(result, {field.readProc}(stream))", 12)
+ yield indent("else: skipField(stream, wireType)", 8)
+ yield ""
+
+iterator genSizeOfMapKVProc(message: Message): string =
+ let
+ key = mapKeyField(message)
+ value = mapValueField(message)
+
+ yield &"proc sizeOf{message.names}KV(key: {key.fullType}, value: {value.fullType}): uint64 ="
+
+ # Key (cannot be message or other complex field)
+ yield indent(&"result = result + sizeOfTag({key.number}, {key.wiretypeStr})", 4)
+ yield indent(&"result = result + {key.sizeOfProc}(key)", 4)
+
+ # Value
+ yield indent(&"result = result + sizeOfTag({value.number}, {value.wiretypeStr})", 4)
+ if isMessage(value):
+ yield indent(&"result = result + sizeOfLengthDelimited({value.sizeOfProc}(value))", 4)
+ else:
+ yield indent(&"result = result + {value.sizeOfProc}(value)", 4)
+
+ yield ""
+
+iterator genSizeOfMessageProc(msg: Message): string =
+ yield &"proc sizeOf{msg.names}*(message: {msg.names}): uint64 ="
+ for field in msg.fields:
+ if isMapEntry(field):
+ yield indent(&"if has{field.name}(message):", 4)
+ yield indent(&"var sizeOfKV = 0'u64", 8)
+ yield indent(&"for key, value in message.{field.name}:", 8)
+ yield indent(&"sizeOfKV = sizeOfKV + {field.sizeOfProc}(key, value)", 12)
+ yield indent(&"result = result + sizeOfTag({field.number}, {field.wiretypeStr})", 8)
+ yield indent(&"result = result + sizeOfLengthDelimited(sizeOfKV)", 8)
+ elif isRepeated(field):
+ if isNumeric(field):
+ yield indent(&"if has{field.name}(message):", 4)
+ yield indent(&"result = result + sizeOfTag({field.number}, WireType.LengthDelimited)", 8)
+ yield indent(&"result = result + sizeOfLengthDelimited(packedFieldSize(message.{field.name}, {field.fieldTypeStr}))", 8)
+ else:
+ yield indent(&"for value in message.{field.name}:", 4)
+ yield indent(&"result = result + sizeOfTag({field.number}, {field.wiretypeStr})", 8)
+ if isMessage(field):
+ yield indent(&"result = result + sizeOfLengthDelimited({field.sizeOfProc}(value))", 8)
+ else:
+ yield indent(&"result = result + {field.sizeOfProc}(value)", 8)
+ else:
+ yield indent(&"if has{field.name}(message):", 4)
+ yield indent(&"result = result + sizeOfTag({field.number}, {field.wiretypeStr})", 8)
+ if isMessage(field):
+ yield indent(&"result = result + sizeOfLengthDelimited({field.sizeOfProc}(message.{field.accessor}))", 8)
+ else:
+ yield indent(&"result = result + {field.sizeOfProc}(message.{field.accessor})", 8)
+
+ if len(msg.fields) == 0:
+ yield indent("result = 0", 4)
+
+ yield ""
+
+iterator genMessageProcForwards(msg: Message): string =
+ if not isMapEntry(msg):
+ yield &"proc new{msg.names}*(): {msg.names}"
+ yield &"proc write{msg.names}*(stream: ProtobufStream, message: {msg.names})"
+ yield &"proc read{msg.names}*(stream: ProtobufStream): {msg.names}"
+ yield &"proc sizeOf{msg.names}*(message: {msg.names}): uint64"
+ else:
+ let
+ key = mapKeyField(msg)
+ value = mapValueField(msg)
+
+ yield &"proc write{msg.names}KV(stream: ProtobufStream, key: {key.fullType}, value: {value.fullType})"
+ yield &"proc read{msg.names}KV(stream: ProtobufStream, tbl: TableRef[{key.fullType}, {value.fullType}])"
+ yield &"proc sizeOf{msg.names}KV(key: {key.fullType}, value: {value.fullType}): uint64"
+
+iterator genProcs(msg: Message): string =
+ if isMapEntry(msg):
+ for line in genSizeOfMapKVProc(msg): yield line
+ for line in genWriteMapKVProc(msg): yield line
+ for line in genReadMapKVProc(msg): yield line
+ else:
+ for line in genNewMessageProc(msg): yield line
+
+ for field in msg.fields:
+ for line in genClearFieldProc(msg, field): yield line
+ for line in genHasFieldProc(msg, field): yield line
+ for line in genSetFieldProc(msg, field): yield line
+
+ if isRepeated(field) and not isMapEntry(field):
+ for line in genAddToFieldProc(msg, field): yield line
+
+ for line in genFieldAccessorProcs(msg, field): yield line
+
+ for line in genSizeOfMessageProc(msg): yield line
+ for line in genWriteMessageProc(msg): yield line
+ for line in genReadMessageProc(msg): yield line
+
+ yield &"proc serialize*(message: {msg.names}): string ="
+ yield indent("let", 4)
+ yield indent("ss = newStringStream()", 8)
+ yield indent("pbs = newProtobufStream(ss)", 8)
+ yield indent(&"write{msg.names}(pbs, message)", 4)
+ yield indent("result = ss.data", 4)
+ yield ""
+
+ yield &"proc new{msg.names}*(data: string): {msg.names} ="
+ yield indent("let", 4)
+ yield indent("ss = newStringStream(data)", 8)
+ yield indent("pbs = newProtobufStream(ss)", 8)
+ yield indent(&"result = read{msg.names}(pbs)", 4)
+ yield ""
+
+proc processFile(filename: string, fdesc: FileDescriptorProto,
+ otherFiles: TableRef[string, ProtoFile]): ProcessedFile =
+ var (dir, name, _) = splitFile(filename)
+ var pbfilename = (dir / name) & "_pb.nim"
+
+ log(&"processing {filename}: {pbfilename}")
+
+ new(result)
+ result.name = pbfilename
+ result.data = ""
+
+ let parsed = parseFile(filename, fdesc)
+
+ var hasMaps = false
+ for message in parsed.messages:
+ let tmp = fixMapEntry(parsed, message)
+ if tmp:
+ hasMaps = true
+
+ addLine(result.data, "# Generated by protoc_gen_nim. Do not edit!")
+ addLine(result.data, "")
+ addLine(result.data, "import intsets")
+ if hasMaps:
+ addLine(result.data, "import tables")
+ addLine(result.data, "export tables")
+ addLine(result.data, "")
+ addLine(result.data, "import nimpb/nimpb")
+ addLine(result.data, "")
+
+ for dep in fdesc.dependency:
+ var (dir, depname, _) = splitFile(dep)
+
+ if dir == "google/protobuf":
+ dir = "nimpb/wkt"
+
+ var deppbname = (dir / depname) & "_pb"
+ addLine(result.data, &"import {deppbname}")
+
+ if hasDependency(fdesc):
+ addLine(result.data, "")
+
+ addLine(result.data, "type")
+
+ for e in parsed.enums:
+ for line in genType(e): addLine(result.data, indent(line, 4))
+
+ for message in parsed.messages:
+ for line in genType(message): addLine(result.data, indent(line, 4))
+
+ addLine(result.data, "")
+
+ for message in sortDependencies(parsed.messages):
+ for line in genMessageProcForwards(message):
+ addLine(result.data, line)
+ addLine(result.data, "")
+
+ for message in sortDependencies(parsed.messages):
+ for line in genProcs(message):
+ addLine(result.data, line)
+ addLine(result.data, "")
+
+proc generateCode(request: CodeGeneratorRequest, response: CodeGeneratorResponse) =
+ let otherFiles = newTable[string, ProtoFile]()
+
+ for file in request.proto_file:
+ add(otherFiles, file.name, parseFile(file.name, file))
+
+ for filename in request.file_to_generate:
+ for fdesc in request.proto_file:
+ if fdesc.name == filename:
+ let results = processFile(filename, fdesc, otherFiles)
+ let f = newCodeGeneratorResponse_File()
+ setName(f, results.name)
+ setContent(f, results.data)
+ addFile(response, f)
+
+proc pluginMain*() =
+ let pbsi = newProtobufStream(newFileStream(stdin))
+ let pbso = newProtobufStream(newFileStream(stdout))
+
+ let request = readCodeGeneratorRequest(pbsi)
+ let response = newCodeGeneratorResponse()
+
+ generateCode(request, response)
+
+ writeCodeGeneratorResponse(pbso, response)
diff --git a/src/nimpb_buildpkg/plugin_pb.nim b/src/nimpb_buildpkg/plugin_pb.nim
new file mode 100644
index 0000000..fa9fc94
--- /dev/null
+++ b/src/nimpb_buildpkg/plugin_pb.nim
@@ -0,0 +1,160 @@
+import intsets
+
+import gen
+import nimpb/nimpb
+
+import descriptor_pb
+
+const
+ VersionDesc = MessageDesc(
+ name: "Version",
+ fields: @[
+ FieldDesc(
+ name: "major",
+ number: 1,
+ ftype: FieldType.Int32,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "minor",
+ number: 2,
+ ftype: FieldType.Int32,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "patch",
+ number: 3,
+ ftype: FieldType.Int32,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "suffix",
+ number: 4,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ )
+ ]
+ )
+
+ CodeGeneratorRequestDesc = MessageDesc(
+ name: "CodeGeneratorRequest",
+ fields: @[
+ FieldDesc(
+ name: "file_to_generate",
+ number: 1,
+ ftype: FieldType.String,
+ label: FieldLabel.Repeated,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "parameter",
+ number: 2,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "proto_file",
+ number: 15,
+ ftype: FieldType.Message,
+ label: FieldLabel.Repeated,
+ typeName: "FileDescriptorProto",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "compiler_version",
+ number: 3,
+ ftype: FieldType.Message,
+ label: FieldLabel.Optional,
+ typeName: "Version",
+ packed: false,
+ oneofIdx: -1,
+ )
+ ]
+ )
+
+ CodeGeneratorResponseDesc = MessageDesc(
+ name: "CodeGeneratorResponse",
+ fields: @[
+ FieldDesc(
+ name: "error",
+ number: 1,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "file",
+ number: 15,
+ ftype: FieldType.Message,
+ label: FieldLabel.Repeated,
+ typeName: "CodeGeneratorResponse_File",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ ]
+ )
+
+ CodeGeneratorResponse_FileDesc = MessageDesc(
+ name: "CodeGeneratorResponse_File",
+ fields: @[
+ FieldDesc(
+ name: "name",
+ number: 1,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "insertion_point",
+ number: 2,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ FieldDesc(
+ name: "content",
+ number: 15,
+ ftype: FieldType.String,
+ label: FieldLabel.Optional,
+ typeName: "",
+ packed: false,
+ oneofIdx: -1,
+ ),
+ ]
+ )
+
+generateMessageType(VersionDesc)
+generateMessageProcs(VersionDesc)
+
+generateMessageType(CodeGeneratorRequestDesc)
+generateMessageProcs(CodeGeneratorRequestDesc)
+
+generateMessageType(CodeGeneratorResponse_FileDesc)
+generateMessageProcs(CodeGeneratorResponse_FileDesc)
+
+generateMessageType(CodeGeneratorResponseDesc)
+generateMessageProcs(CodeGeneratorResponseDesc)
diff --git a/src/nimpb_buildpkg/protobuf/LICENSE b/src/nimpb_buildpkg/protobuf/LICENSE
new file mode 100644
index 0000000..f028c82
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/LICENSE
@@ -0,0 +1,42 @@
+This license applies to all parts of Protocol Buffers except the following:
+
+ - Atomicops support for generic gcc, located in
+ src/google/protobuf/stubs/atomicops_internals_generic_gcc.h.
+ This file is copyrighted by Red Hat Inc.
+
+ - Atomicops support for AIX/POWER, located in
+ src/google/protobuf/stubs/atomicops_internals_power.h.
+ This file is copyrighted by Bloomberg Finance LP.
+
+Copyright 2014, Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Code generated by the Protocol Buffer compiler is owned by the owner
+of the input file used when generating it. This code is not
+standalone and requires a support library to be linked with it. This
+support library is itself covered by the above license.
diff --git a/src/nimpb_buildpkg/protobuf/include/google/protobuf/any.proto b/src/nimpb_buildpkg/protobuf/include/google/protobuf/any.proto
new file mode 100644
index 0000000..c748667
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/include/google/protobuf/any.proto
@@ -0,0 +1,149 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/any";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "AnyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+// Foo foo = ...;
+// Any any;
+// any.PackFrom(foo);
+// ...
+// if (any.UnpackTo(&foo)) {
+// ...
+// }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+// Foo foo = ...;
+// Any any = Any.pack(foo);
+// ...
+// if (any.is(Foo.class)) {
+// foo = any.unpack(Foo.class);
+// }
+//
+// Example 3: Pack and unpack a message in Python.
+//
+// foo = Foo(...)
+// any = Any()
+// any.Pack(foo)
+// ...
+// if any.Is(Foo.DESCRIPTOR):
+// any.Unpack(foo)
+// ...
+//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+// package google.profile;
+// message Person {
+// string first_name = 1;
+// string last_name = 2;
+// }
+//
+// {
+// "@type": "type.googleapis.com/google.profile.Person",
+// "firstName": <string>,
+// "lastName": <string>
+// }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+// {
+// "@type": "type.googleapis.com/google.protobuf.Duration",
+// "value": "1.212s"
+// }
+//
+message Any {
+ // A URL/resource name whose content describes the type of the
+ // serialized protocol buffer message.
+ //
+ // For URLs which use the scheme `http`, `https`, or no scheme, the
+ // following restrictions and interpretations apply:
+ //
+ // * If no scheme is provided, `https` is assumed.
+ // * The last segment of the URL's path must represent the fully
+ // qualified name of the type (as in `path/google.protobuf.Duration`).
+ // The name should be in a canonical form (e.g., leading "." is
+ // not accepted).
+ // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+ // value in binary format, or produce an error.
+ // * Applications are allowed to cache lookup results based on the
+ // URL, or have them precompiled into a binary to avoid any
+ // lookup. Therefore, binary compatibility needs to be preserved
+ // on changes to types. (Use versioned type names to manage
+ // breaking changes.)
+ //
+ // Schemes other than `http`, `https` (or the empty scheme) might be
+ // used with implementation specific semantics.
+ //
+ string type_url = 1;
+
+ // Must be a valid serialized protocol buffer of the above specified type.
+ bytes value = 2;
+}
diff --git a/src/nimpb_buildpkg/protobuf/include/google/protobuf/api.proto b/src/nimpb_buildpkg/protobuf/include/google/protobuf/api.proto
new file mode 100644
index 0000000..f37ee2f
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/include/google/protobuf/api.proto
@@ -0,0 +1,210 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+import "google/protobuf/source_context.proto";
+import "google/protobuf/type.proto";
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "ApiProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option go_package = "google.golang.org/genproto/protobuf/api;api";
+
+// Api is a light-weight descriptor for an API Interface.
+//
+// Interfaces are also described as "protocol buffer services" in some contexts,
+// such as by the "service" keyword in a .proto file, but they are different
+// from API Services, which represent a concrete implementation of an interface
+// as opposed to simply a description of methods and bindings. They are also
+// sometimes simply referred to as "APIs" in other contexts, such as the name of
+// this message itself. See https://cloud.google.com/apis/design/glossary for
+// detailed terminology.
+message Api {
+
+ // The fully qualified name of this interface, including package name
+ // followed by the interface's simple name.
+ string name = 1;
+
+ // The methods of this interface, in unspecified order.
+ repeated Method methods = 2;
+
+ // Any metadata attached to the interface.
+ repeated Option options = 3;
+
+ // A version string for this interface. If specified, must have the form
+ // `major-version.minor-version`, as in `1.10`. If the minor version is
+ // omitted, it defaults to zero. If the entire version field is empty, the
+ // major version is derived from the package name, as outlined below. If the
+ // field is not empty, the version in the package name will be verified to be
+ // consistent with what is provided here.
+ //
+ // The versioning schema uses [semantic
+ // versioning](http://semver.org) where the major version number
+ // indicates a breaking change and the minor version an additive,
+ // non-breaking change. Both version numbers are signals to users
+ // what to expect from different versions, and should be carefully
+ // chosen based on the product plan.
+ //
+ // The major version is also reflected in the package name of the
+ // interface, which must end in `v<major-version>`, as in
+ // `google.feature.v1`. For major versions 0 and 1, the suffix can
+ // be omitted. Zero major versions must only be used for
+ // experimental, non-GA interfaces.
+ //
+ //
+ string version = 4;
+
+ // Source context for the protocol buffer service represented by this
+ // message.
+ SourceContext source_context = 5;
+
+ // Included interfaces. See [Mixin][].
+ repeated Mixin mixins = 6;
+
+ // The source syntax of the service.
+ Syntax syntax = 7;
+}
+
+// Method represents a method of an API interface.
+message Method {
+
+ // The simple name of this method.
+ string name = 1;
+
+ // A URL of the input message type.
+ string request_type_url = 2;
+
+ // If true, the request is streamed.
+ bool request_streaming = 3;
+
+ // The URL of the output message type.
+ string response_type_url = 4;
+
+ // If true, the response is streamed.
+ bool response_streaming = 5;
+
+ // Any metadata attached to the method.
+ repeated Option options = 6;
+
+ // The source syntax of this method.
+ Syntax syntax = 7;
+}
+
+// Declares an API Interface to be included in this interface. The including
+// interface must redeclare all the methods from the included interface, but
+// documentation and options are inherited as follows:
+//
+// - If after comment and whitespace stripping, the documentation
+// string of the redeclared method is empty, it will be inherited
+// from the original method.
+//
+// - Each annotation belonging to the service config (http,
+// visibility) which is not set in the redeclared method will be
+// inherited.
+//
+// - If an http annotation is inherited, the path pattern will be
+// modified as follows. Any version prefix will be replaced by the
+// version of the including interface plus the [root][] path if
+// specified.
+//
+// Example of a simple mixin:
+//
+// package google.acl.v1;
+// service AccessControl {
+// // Get the underlying ACL object.
+// rpc GetAcl(GetAclRequest) returns (Acl) {
+// option (google.api.http).get = "/v1/{resource=**}:getAcl";
+// }
+// }
+//
+// package google.storage.v2;
+// service Storage {
+// rpc GetAcl(GetAclRequest) returns (Acl);
+//
+// // Get a data record.
+// rpc GetData(GetDataRequest) returns (Data) {
+// option (google.api.http).get = "/v2/{resource=**}";
+// }
+// }
+//
+// Example of a mixin configuration:
+//
+// apis:
+// - name: google.storage.v2.Storage
+// mixins:
+// - name: google.acl.v1.AccessControl
+//
+// The mixin construct implies that all methods in `AccessControl` are
+// also declared with same name and request/response types in
+// `Storage`. A documentation generator or annotation processor will
+// see the effective `Storage.GetAcl` method after inherting
+// documentation and annotations as follows:
+//
+// service Storage {
+// // Get the underlying ACL object.
+// rpc GetAcl(GetAclRequest) returns (Acl) {
+// option (google.api.http).get = "/v2/{resource=**}:getAcl";
+// }
+// ...
+// }
+//
+// Note how the version in the path pattern changed from `v1` to `v2`.
+//
+// If the `root` field in the mixin is specified, it should be a
+// relative path under which inherited HTTP paths are placed. Example:
+//
+// apis:
+// - name: google.storage.v2.Storage
+// mixins:
+// - name: google.acl.v1.AccessControl
+// root: acls
+//
+// This implies the following inherited HTTP annotation:
+//
+// service Storage {
+// // Get the underlying ACL object.
+// rpc GetAcl(GetAclRequest) returns (Acl) {
+// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl";
+// }
+// ...
+// }
+message Mixin {
+ // The fully qualified name of the interface which is included.
+ string name = 1;
+
+ // If non-empty specifies a path under which inherited HTTP paths
+ // are rooted.
+ string root = 2;
+}
diff --git a/src/nimpb_buildpkg/protobuf/include/google/protobuf/compiler/plugin.proto b/src/nimpb_buildpkg/protobuf/include/google/protobuf/compiler/plugin.proto
new file mode 100644
index 0000000..5b55745
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/include/google/protobuf/compiler/plugin.proto
@@ -0,0 +1,167 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//
+// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
+// change.
+//
+// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
+// just a program that reads a CodeGeneratorRequest from stdin and writes a
+// CodeGeneratorResponse to stdout.
+//
+// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
+// of dealing with the raw protocol defined here.
+//
+// A plugin executable needs only to be placed somewhere in the path. The
+// plugin should be named "protoc-gen-$NAME", and will then be used when the
+// flag "--${NAME}_out" is passed to protoc.
+
+syntax = "proto2";
+package google.protobuf.compiler;
+option java_package = "com.google.protobuf.compiler";
+option java_outer_classname = "PluginProtos";
+
+option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
+
+import "google/protobuf/descriptor.proto";
+
+// The version number of protocol compiler.
+message Version {
+ optional int32 major = 1;
+ optional int32 minor = 2;
+ optional int32 patch = 3;
+ // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+ // be empty for mainline stable releases.
+ optional string suffix = 4;
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+message CodeGeneratorRequest {
+ // The .proto files that were explicitly listed on the command-line. The
+ // code generator should generate code only for these files. Each file's
+ // descriptor will be included in proto_file, below.
+ repeated string file_to_generate = 1;
+
+ // The generator parameter passed on the command-line.
+ optional string parameter = 2;
+
+ // FileDescriptorProtos for all files in files_to_generate and everything
+ // they import. The files will appear in topological order, so each file
+ // appears before any file that imports it.
+ //
+ // protoc guarantees that all proto_files will be written after
+ // the fields above, even though this is not technically guaranteed by the
+ // protobuf wire format. This theoretically could allow a plugin to stream
+ // in the FileDescriptorProtos and handle them one by one rather than read
+ // the entire set into memory at once. However, as of this writing, this
+ // is not similarly optimized on protoc's end -- it will store all fields in
+ // memory at once before sending them to the plugin.
+ //
+ // Type names of fields and extensions in the FileDescriptorProto are always
+ // fully qualified.
+ repeated FileDescriptorProto proto_file = 15;
+
+ // The version number of protocol compiler.
+ optional Version compiler_version = 3;
+
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+message CodeGeneratorResponse {
+ // Error message. If non-empty, code generation failed. The plugin process
+ // should exit with status code zero even if it reports an error in this way.
+ //
+ // This should be used to indicate errors in .proto files which prevent the
+ // code generator from generating correct code. Errors which indicate a
+ // problem in protoc itself -- such as the input CodeGeneratorRequest being
+ // unparseable -- should be reported by writing a message to stderr and
+ // exiting with a non-zero status code.
+ optional string error = 1;
+
+ // Represents a single generated file.
+ message File {
+ // The file name, relative to the output directory. The name must not
+ // contain "." or ".." components and must be relative, not be absolute (so,
+ // the file cannot lie outside the output directory). "/" must be used as
+ // the path separator, not "\".
+ //
+ // If the name is omitted, the content will be appended to the previous
+ // file. This allows the generator to break large files into small chunks,
+ // and allows the generated text to be streamed back to protoc so that large
+ // files need not reside completely in memory at one time. Note that as of
+ // this writing protoc does not optimize for this -- it will read the entire
+ // CodeGeneratorResponse before writing files to disk.
+ optional string name = 1;
+
+ // If non-empty, indicates that the named file should already exist, and the
+ // content here is to be inserted into that file at a defined insertion
+ // point. This feature allows a code generator to extend the output
+ // produced by another code generator. The original generator may provide
+ // insertion points by placing special annotations in the file that look
+ // like:
+ // @@protoc_insertion_point(NAME)
+ // The annotation can have arbitrary text before and after it on the line,
+ // which allows it to be placed in a comment. NAME should be replaced with
+ // an identifier naming the point -- this is what other generators will use
+ // as the insertion_point. Code inserted at this point will be placed
+ // immediately above the line containing the insertion point (thus multiple
+ // insertions to the same point will come out in the order they were added).
+ // The double-@ is intended to make it unlikely that the generated code
+ // could contain things that look like insertion points by accident.
+ //
+ // For example, the C++ code generator places the following line in the
+ // .pb.h files that it generates:
+ // // @@protoc_insertion_point(namespace_scope)
+ // This line appears within the scope of the file's package namespace, but
+ // outside of any particular class. Another plugin can then specify the
+ // insertion_point "namespace_scope" to generate additional classes or
+ // other declarations that should be placed in this scope.
+ //
+ // Note that if the line containing the insertion point begins with
+ // whitespace, the same whitespace will be added to every line of the
+ // inserted text. This is useful for languages like Python, where
+ // indentation matters. In these languages, the insertion point comment
+ // should be indented the same amount as any inserted code will need to be
+ // in order to work correctly in that context.
+ //
+ // The code generator that generates the initial file and the one which
+ // inserts into it must both run as part of a single invocation of protoc.
+ // Code generators are executed in the order in which they appear on the
+ // command line.
+ //
+ // If |insertion_point| is present, |name| must also be present.
+ optional string insertion_point = 2;
+
+ // The file contents.
+ optional string content = 15;
+ }
+ repeated File file = 15;
+}
diff --git a/src/nimpb_buildpkg/protobuf/include/google/protobuf/descriptor.proto b/src/nimpb_buildpkg/protobuf/include/google/protobuf/descriptor.proto
new file mode 100644
index 0000000..8697a50
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/include/google/protobuf/descriptor.proto
@@ -0,0 +1,872 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+// Based on original Protocol Buffers design by
+// Sanjay Ghemawat, Jeff Dean, and others.
+//
+// The messages in this file describe the definitions found in .proto files.
+// A valid .proto file can be translated directly to a FileDescriptorProto
+// without any other information (e.g. without reading its imports).
+
+
+syntax = "proto2";
+
+package google.protobuf;
+option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DescriptorProtos";
+option csharp_namespace = "Google.Protobuf.Reflection";
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// descriptor.proto must be optimized for speed because reflection-based
+// algorithms don't work during bootstrapping.
+option optimize_for = SPEED;
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+message FileDescriptorSet {
+ repeated FileDescriptorProto file = 1;
+}
+
+// Describes a complete .proto file.
+message FileDescriptorProto {
+ optional string name = 1; // file name, relative to root of source tree
+ optional string package = 2; // e.g. "foo", "foo.bar", etc.
+
+ // Names of files imported by this file.
+ repeated string dependency = 3;
+ // Indexes of the public imported files in the dependency list above.
+ repeated int32 public_dependency = 10;
+ // Indexes of the weak imported files in the dependency list.
+ // For Google-internal migration only. Do not use.
+ repeated int32 weak_dependency = 11;
+
+ // All top-level definitions in this file.
+ repeated DescriptorProto message_type = 4;
+ repeated EnumDescriptorProto enum_type = 5;
+ repeated ServiceDescriptorProto service = 6;
+ repeated FieldDescriptorProto extension = 7;
+
+ optional FileOptions options = 8;
+
+ // This field contains optional information about the original source code.
+ // You may safely remove this entire field without harming runtime
+ // functionality of the descriptors -- the information is needed only by
+ // development tools.
+ optional SourceCodeInfo source_code_info = 9;
+
+ // The syntax of the proto file.
+ // The supported values are "proto2" and "proto3".
+ optional string syntax = 12;
+}
+
+// Describes a message type.
+message DescriptorProto {
+ optional string name = 1;
+
+ repeated FieldDescriptorProto field = 2;
+ repeated FieldDescriptorProto extension = 6;
+
+ repeated DescriptorProto nested_type = 3;
+ repeated EnumDescriptorProto enum_type = 4;
+
+ message ExtensionRange {
+ optional int32 start = 1;
+ optional int32 end = 2;
+
+ optional ExtensionRangeOptions options = 3;
+ }
+ repeated ExtensionRange extension_range = 5;
+
+ repeated OneofDescriptorProto oneof_decl = 8;
+
+ optional MessageOptions options = 7;
+
+ // Range of reserved tag numbers. Reserved tag numbers may not be used by
+ // fields or extension ranges in the same message. Reserved ranges may
+ // not overlap.
+ message ReservedRange {
+ optional int32 start = 1; // Inclusive.
+ optional int32 end = 2; // Exclusive.
+ }
+ repeated ReservedRange reserved_range = 9;
+ // Reserved field names, which may not be used by fields in the same message.
+ // A given name may only be reserved once.
+ repeated string reserved_name = 10;
+}
+
+message ExtensionRangeOptions {
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+// Describes a field within a message.
+message FieldDescriptorProto {
+ enum Type {
+ // 0 is reserved for errors.
+ // Order is weird for historical reasons.
+ TYPE_DOUBLE = 1;
+ TYPE_FLOAT = 2;
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
+ // negative values are likely.
+ TYPE_INT64 = 3;
+ TYPE_UINT64 = 4;
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
+ // negative values are likely.
+ TYPE_INT32 = 5;
+ TYPE_FIXED64 = 6;
+ TYPE_FIXED32 = 7;
+ TYPE_BOOL = 8;
+ TYPE_STRING = 9;
+ // Tag-delimited aggregate.
+ // Group type is deprecated and not supported in proto3. However, Proto3
+ // implementations should still be able to parse the group wire format and
+ // treat group fields as unknown fields.
+ TYPE_GROUP = 10;
+ TYPE_MESSAGE = 11; // Length-delimited aggregate.
+
+ // New in version 2.
+ TYPE_BYTES = 12;
+ TYPE_UINT32 = 13;
+ TYPE_ENUM = 14;
+ TYPE_SFIXED32 = 15;
+ TYPE_SFIXED64 = 16;
+ TYPE_SINT32 = 17; // Uses ZigZag encoding.
+ TYPE_SINT64 = 18; // Uses ZigZag encoding.
+ };
+
+ enum Label {
+ // 0 is reserved for errors
+ LABEL_OPTIONAL = 1;
+ LABEL_REQUIRED = 2;
+ LABEL_REPEATED = 3;
+ };
+
+ optional string name = 1;
+ optional int32 number = 3;
+ optional Label label = 4;
+
+ // If type_name is set, this need not be set. If both this and type_name
+ // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+ optional Type type = 5;
+
+ // For message and enum types, this is the name of the type. If the name
+ // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
+ // rules are used to find the type (i.e. first the nested types within this
+ // message are searched, then within the parent, on up to the root
+ // namespace).
+ optional string type_name = 6;
+
+ // For extensions, this is the name of the type being extended. It is
+ // resolved in the same manner as type_name.
+ optional string extendee = 2;
+
+ // For numeric types, contains the original text representation of the value.
+ // For booleans, "true" or "false".
+ // For strings, contains the default text contents (not escaped in any way).
+ // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
+ // TODO(kenton): Base-64 encode?
+ optional string default_value = 7;
+
+ // If set, gives the index of a oneof in the containing type's oneof_decl
+ // list. This field is a member of that oneof.
+ optional int32 oneof_index = 9;
+
+ // JSON name of this field. The value is set by protocol compiler. If the
+ // user has set a "json_name" option on this field, that option's value
+ // will be used. Otherwise, it's deduced from the field's name by converting
+ // it to camelCase.
+ optional string json_name = 10;
+
+ optional FieldOptions options = 8;
+}
+
+// Describes a oneof.
+message OneofDescriptorProto {
+ optional string name = 1;
+ optional OneofOptions options = 2;
+}
+
+// Describes an enum type.
+message EnumDescriptorProto {
+ optional string name = 1;
+
+ repeated EnumValueDescriptorProto value = 2;
+
+ optional EnumOptions options = 3;
+
+ // Range of reserved numeric values. Reserved values may not be used by
+ // entries in the same enum. Reserved ranges may not overlap.
+ //
+ // Note that this is distinct from DescriptorProto.ReservedRange in that it
+ // is inclusive such that it can appropriately represent the entire int32
+ // domain.
+ message EnumReservedRange {
+ optional int32 start = 1; // Inclusive.
+ optional int32 end = 2; // Inclusive.
+ }
+
+ // Range of reserved numeric values. Reserved numeric values may not be used
+ // by enum values in the same enum declaration. Reserved ranges may not
+ // overlap.
+ repeated EnumReservedRange reserved_range = 4;
+
+ // Reserved enum value names, which may not be reused. A given name may only
+ // be reserved once.
+ repeated string reserved_name = 5;
+}
+
+// Describes a value within an enum.
+message EnumValueDescriptorProto {
+ optional string name = 1;
+ optional int32 number = 2;
+
+ optional EnumValueOptions options = 3;
+}
+
+// Describes a service.
+message ServiceDescriptorProto {
+ optional string name = 1;
+ repeated MethodDescriptorProto method = 2;
+
+ optional ServiceOptions options = 3;
+}
+
+// Describes a method of a service.
+message MethodDescriptorProto {
+ optional string name = 1;
+
+ // Input and output type names. These are resolved in the same way as
+ // FieldDescriptorProto.type_name, but must refer to a message type.
+ optional string input_type = 2;
+ optional string output_type = 3;
+
+ optional MethodOptions options = 4;
+
+ // Identifies if client streams multiple client messages
+ optional bool client_streaming = 5 [default=false];
+ // Identifies if server streams multiple server messages
+ optional bool server_streaming = 6 [default=false];
+}
+
+
+// ===================================================================
+// Options
+
+// Each of the definitions above may have "options" attached. These are
+// just annotations which may cause code to be generated slightly differently
+// or may contain hints for code that manipulates protocol messages.
+//
+// Clients may define custom options as extensions of the *Options messages.
+// These extensions may not yet be known at parsing time, so the parser cannot
+// store the values in them. Instead it stores them in a field in the *Options
+// message called uninterpreted_option. This field must have the same name
+// across all *Options messages. We then use this field to populate the
+// extensions when we build a descriptor, at which point all protos have been
+// parsed and so all extensions are known.
+//
+// Extension numbers for custom options may be chosen as follows:
+// * For options which will only be used within a single application or
+// organization, or for experimental options, use field numbers 50000
+// through 99999. It is up to you to ensure that you do not use the
+// same number for multiple options.
+// * For options which will be published and used publicly by multiple
+// independent entities, e-mail protobuf-global-extension-registry@google.com
+// to reserve extension numbers. Simply provide your project name (e.g.
+// Objective-C plugin) and your project website (if available) -- there's no
+// need to explain how you intend to use them. Usually you only need one
+// extension number. You can declare multiple options with only one extension
+// number by putting them in a sub-message. See the Custom Options section of
+// the docs for examples:
+// https://developers.google.com/protocol-buffers/docs/proto#options
+// If this turns out to be popular, a web service will be set up
+// to automatically assign option numbers.
+
+
+message FileOptions {
+
+ // Sets the Java package where classes generated from this .proto will be
+ // placed. By default, the proto package is used, but this is often
+ // inappropriate because proto packages do not normally start with backwards
+ // domain names.
+ optional string java_package = 1;
+
+
+ // If set, all the classes from the .proto file are wrapped in a single
+ // outer class with the given name. This applies to both Proto1
+ // (equivalent to the old "--one_java_file" option) and Proto2 (where
+ // a .proto always translates to a single class, but you may want to
+ // explicitly choose the class name).
+ optional string java_outer_classname = 8;
+
+ // If set true, then the Java code generator will generate a separate .java
+ // file for each top-level message, enum, and service defined in the .proto
+ // file. Thus, these types will *not* be nested inside the outer class
+ // named by java_outer_classname. However, the outer class will still be
+ // generated to contain the file's getDescriptor() method as well as any
+ // top-level extensions defined in the file.
+ optional bool java_multiple_files = 10 [default=false];
+
+ // This option does nothing.
+ optional bool java_generate_equals_and_hash = 20 [deprecated=true];
+
+ // If set true, then the Java2 code generator will generate code that
+ // throws an exception whenever an attempt is made to assign a non-UTF-8
+ // byte sequence to a string field.
+ // Message reflection will do the same.
+ // However, an extension field still accepts non-UTF-8 byte sequences.
+ // This option has no effect on when used with the lite runtime.
+ optional bool java_string_check_utf8 = 27 [default=false];
+
+
+ // Generated classes can be optimized for speed or code size.
+ enum OptimizeMode {
+ SPEED = 1; // Generate complete code for parsing, serialization,
+ // etc.
+ CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
+ LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+ }
+ optional OptimizeMode optimize_for = 9 [default=SPEED];
+
+ // Sets the Go package where structs generated from this .proto will be
+ // placed. If omitted, the Go package will be derived from the following:
+ // - The basename of the package import path, if provided.
+ // - Otherwise, the package statement in the .proto file, if present.
+ // - Otherwise, the basename of the .proto file, without extension.
+ optional string go_package = 11;
+
+
+
+ // Should generic services be generated in each language? "Generic" services
+ // are not specific to any particular RPC system. They are generated by the
+ // main code generators in each language (without additional plugins).
+ // Generic services were the only kind of service generation supported by
+ // early versions of google.protobuf.
+ //
+ // Generic services are now considered deprecated in favor of using plugins
+ // that generate code specific to your particular RPC system. Therefore,
+ // these default to false. Old code which depends on generic services should
+ // explicitly set them to true.
+ optional bool cc_generic_services = 16 [default=false];
+ optional bool java_generic_services = 17 [default=false];
+ optional bool py_generic_services = 18 [default=false];
+ optional bool php_generic_services = 42 [default=false];
+
+ // Is this file deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for everything in the file, or it will be completely ignored; in the very
+ // least, this is a formalization for deprecating files.
+ optional bool deprecated = 23 [default=false];
+
+ // Enables the use of arenas for the proto messages in this file. This applies
+ // only to generated classes for C++.
+ optional bool cc_enable_arenas = 31 [default=false];
+
+
+ // Sets the objective c class prefix which is prepended to all objective c
+ // generated classes from this .proto. There is no default.
+ optional string objc_class_prefix = 36;
+
+ // Namespace for generated classes; defaults to the package.
+ optional string csharp_namespace = 37;
+
+ // By default Swift generators will take the proto package and CamelCase it
+ // replacing '.' with underscore and use that to prefix the types/symbols
+ // defined. When this options is provided, they will use this value instead
+ // to prefix the types/symbols defined.
+ optional string swift_prefix = 39;
+
+ // Sets the php class prefix which is prepended to all php generated classes
+ // from this .proto. Default is empty.
+ optional string php_class_prefix = 40;
+
+ // Use this option to change the namespace of php generated classes. Default
+ // is empty. When this option is empty, the package name will be used for
+ // determining the namespace.
+ optional string php_namespace = 41;
+
+ // The parser stores options it doesn't recognize here.
+ // See the documentation for the "Options" section above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message.
+ // See the documentation for the "Options" section above.
+ extensions 1000 to max;
+
+ reserved 38;
+}
+
+message MessageOptions {
+ // Set true to use the old proto1 MessageSet wire format for extensions.
+ // This is provided for backwards-compatibility with the MessageSet wire
+ // format. You should not use this for any other reason: It's less
+ // efficient, has fewer features, and is more complicated.
+ //
+ // The message must be defined exactly as follows:
+ // message Foo {
+ // option message_set_wire_format = true;
+ // extensions 4 to max;
+ // }
+ // Note that the message cannot have any defined fields; MessageSets only
+ // have extensions.
+ //
+ // All extensions of your type must be singular messages; e.g. they cannot
+ // be int32s, enums, or repeated messages.
+ //
+ // Because this is an option, the above two restrictions are not enforced by
+ // the protocol compiler.
+ optional bool message_set_wire_format = 1 [default=false];
+
+ // Disables the generation of the standard "descriptor()" accessor, which can
+ // conflict with a field of the same name. This is meant to make migration
+ // from proto1 easier; new code should avoid fields named "descriptor".
+ optional bool no_standard_descriptor_accessor = 2 [default=false];
+
+ // Is this message deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the message, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating messages.
+ optional bool deprecated = 3 [default=false];
+
+ // Whether the message is an automatically generated map entry type for the
+ // maps field.
+ //
+ // For maps fields:
+ // map<KeyType, ValueType> map_field = 1;
+ // The parsed descriptor looks like:
+ // message MapFieldEntry {
+ // option map_entry = true;
+ // optional KeyType key = 1;
+ // optional ValueType value = 2;
+ // }
+ // repeated MapFieldEntry map_field = 1;
+ //
+ // Implementations may choose not to generate the map_entry=true message, but
+ // use a native map in the target language to hold the keys and values.
+ // The reflection APIs in such implementions still need to work as
+ // if the field is a repeated message field.
+ //
+ // NOTE: Do not set the option in .proto files. Always use the maps syntax
+ // instead. The option should only be implicitly set by the proto compiler
+ // parser.
+ optional bool map_entry = 7;
+
+ reserved 8; // javalite_serializable
+ reserved 9; // javanano_as_lite
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message FieldOptions {
+ // The ctype option instructs the C++ code generator to use a different
+ // representation of the field than it normally would. See the specific
+ // options below. This option is not yet implemented in the open source
+ // release -- sorry, we'll try to include it in a future version!
+ optional CType ctype = 1 [default = STRING];
+ enum CType {
+ // Default mode.
+ STRING = 0;
+
+ CORD = 1;
+
+ STRING_PIECE = 2;
+ }
+ // The packed option can be enabled for repeated primitive fields to enable
+ // a more efficient representation on the wire. Rather than repeatedly
+ // writing the tag and type for each element, the entire array is encoded as
+ // a single length-delimited blob. In proto3, only explicit setting it to
+ // false will avoid using packed encoding.
+ optional bool packed = 2;
+
+ // The jstype option determines the JavaScript type used for values of the
+ // field. The option is permitted only for 64 bit integral and fixed types
+ // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
+ // is represented as JavaScript string, which avoids loss of precision that
+ // can happen when a large value is converted to a floating point JavaScript.
+ // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+ // use the JavaScript "number" type. The behavior of the default option
+ // JS_NORMAL is implementation dependent.
+ //
+ // This option is an enum to permit additional types to be added, e.g.
+ // goog.math.Integer.
+ optional JSType jstype = 6 [default = JS_NORMAL];
+ enum JSType {
+ // Use the default type.
+ JS_NORMAL = 0;
+
+ // Use JavaScript strings.
+ JS_STRING = 1;
+
+ // Use JavaScript numbers.
+ JS_NUMBER = 2;
+ }
+
+ // Should this field be parsed lazily? Lazy applies only to message-type
+ // fields. It means that when the outer message is initially parsed, the
+ // inner message's contents will not be parsed but instead stored in encoded
+ // form. The inner message will actually be parsed when it is first accessed.
+ //
+ // This is only a hint. Implementations are free to choose whether to use
+ // eager or lazy parsing regardless of the value of this option. However,
+ // setting this option true suggests that the protocol author believes that
+ // using lazy parsing on this field is worth the additional bookkeeping
+ // overhead typically needed to implement it.
+ //
+ // This option does not affect the public interface of any generated code;
+ // all method signatures remain the same. Furthermore, thread-safety of the
+ // interface is not affected by this option; const methods remain safe to
+ // call from multiple threads concurrently, while non-const methods continue
+ // to require exclusive access.
+ //
+ //
+ // Note that implementations may choose not to check required fields within
+ // a lazy sub-message. That is, calling IsInitialized() on the outer message
+ // may return true even if the inner message has missing required fields.
+ // This is necessary because otherwise the inner message would have to be
+ // parsed in order to perform the check, defeating the purpose of lazy
+ // parsing. An implementation which chooses not to check required fields
+ // must be consistent about it. That is, for any particular sub-message, the
+ // implementation must either *always* check its required fields, or *never*
+ // check its required fields, regardless of whether or not the message has
+ // been parsed.
+ optional bool lazy = 5 [default=false];
+
+ // Is this field deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for accessors, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating fields.
+ optional bool deprecated = 3 [default=false];
+
+ // For Google-internal migration only. Do not use.
+ optional bool weak = 10 [default=false];
+
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+
+ reserved 4; // removed jtype
+}
+
+message OneofOptions {
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message EnumOptions {
+
+ // Set this option to true to allow mapping different tag names to the same
+ // value.
+ optional bool allow_alias = 2;
+
+ // Is this enum deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating enums.
+ optional bool deprecated = 3 [default=false];
+
+ reserved 5; // javanano_as_lite
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message EnumValueOptions {
+ // Is this enum value deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum value, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating enum values.
+ optional bool deprecated = 1 [default=false];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message ServiceOptions {
+
+ // Note: Field numbers 1 through 32 are reserved for Google's internal RPC
+ // framework. We apologize for hoarding these numbers to ourselves, but
+ // we were already using them long before we decided to release Protocol
+ // Buffers.
+
+ // Is this service deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the service, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating services.
+ optional bool deprecated = 33 [default=false];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message MethodOptions {
+
+ // Note: Field numbers 1 through 32 are reserved for Google's internal RPC
+ // framework. We apologize for hoarding these numbers to ourselves, but
+ // we were already using them long before we decided to release Protocol
+ // Buffers.
+
+ // Is this method deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the method, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating methods.
+ optional bool deprecated = 33 [default=false];
+
+ // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+ // or neither? HTTP based RPC implementation may choose GET verb for safe
+ // methods, and PUT verb for idempotent methods instead of the default POST.
+ enum IdempotencyLevel {
+ IDEMPOTENCY_UNKNOWN = 0;
+ NO_SIDE_EFFECTS = 1; // implies idempotent
+ IDEMPOTENT = 2; // idempotent, but may have side effects
+ }
+ optional IdempotencyLevel idempotency_level =
+ 34 [default=IDEMPOTENCY_UNKNOWN];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+message UninterpretedOption {
+ // The name of the uninterpreted option. Each string represents a segment in
+ // a dot-separated name. is_extension is true iff a segment represents an
+ // extension (denoted with parentheses in options specs in .proto files).
+ // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+ // "foo.(bar.baz).qux".
+ message NamePart {
+ required string name_part = 1;
+ required bool is_extension = 2;
+ }
+ repeated NamePart name = 2;
+
+ // The value of the uninterpreted option, in whatever type the tokenizer
+ // identified it as during parsing. Exactly one of these should be set.
+ optional string identifier_value = 3;
+ optional uint64 positive_int_value = 4;
+ optional int64 negative_int_value = 5;
+ optional double double_value = 6;
+ optional bytes string_value = 7;
+ optional string aggregate_value = 8;
+}
+
+// ===================================================================
+// Optional source code info
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+message SourceCodeInfo {
+ // A Location identifies a piece of source code in a .proto file which
+ // corresponds to a particular definition. This information is intended
+ // to be useful to IDEs, code indexers, documentation generators, and similar
+ // tools.
+ //
+ // For example, say we have a file like:
+ // message Foo {
+ // optional string foo = 1;
+ // }
+ // Let's look at just the field definition:
+ // optional string foo = 1;
+ // ^ ^^ ^^ ^ ^^^
+ // a bc de f ghi
+ // We have the following locations:
+ // span path represents
+ // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
+ // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
+ // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
+ // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
+ // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
+ //
+ // Notes:
+ // - A location may refer to a repeated field itself (i.e. not to any
+ // particular index within it). This is used whenever a set of elements are
+ // logically enclosed in a single code segment. For example, an entire
+ // extend block (possibly containing multiple extension definitions) will
+ // have an outer location whose path refers to the "extensions" repeated
+ // field without an index.
+ // - Multiple locations may have the same path. This happens when a single
+ // logical declaration is spread out across multiple places. The most
+ // obvious example is the "extend" block again -- there may be multiple
+ // extend blocks in the same scope, each of which will have the same path.
+ // - A location's span is not always a subset of its parent's span. For
+ // example, the "extendee" of an extension declaration appears at the
+ // beginning of the "extend" block and is shared by all extensions within
+ // the block.
+ // - Just because a location's span is a subset of some other location's span
+ // does not mean that it is a descendent. For example, a "group" defines
+ // both a type and a field in a single declaration. Thus, the locations
+ // corresponding to the type and field and their components will overlap.
+ // - Code which tries to interpret locations should probably be designed to
+ // ignore those that it doesn't understand, as more types of locations could
+ // be recorded in the future.
+ repeated Location location = 1;
+ message Location {
+ // Identifies which part of the FileDescriptorProto was defined at this
+ // location.
+ //
+ // Each element is a field number or an index. They form a path from
+ // the root FileDescriptorProto to the place where the definition. For
+ // example, this path:
+ // [ 4, 3, 2, 7, 1 ]
+ // refers to:
+ // file.message_type(3) // 4, 3
+ // .field(7) // 2, 7
+ // .name() // 1
+ // This is because FileDescriptorProto.message_type has field number 4:
+ // repeated DescriptorProto message_type = 4;
+ // and DescriptorProto.field has field number 2:
+ // repeated FieldDescriptorProto field = 2;
+ // and FieldDescriptorProto.name has field number 1:
+ // optional string name = 1;
+ //
+ // Thus, the above path gives the location of a field name. If we removed
+ // the last element:
+ // [ 4, 3, 2, 7 ]
+ // this path refers to the whole field declaration (from the beginning
+ // of the label to the terminating semicolon).
+ repeated int32 path = 1 [packed=true];
+
+ // Always has exactly three or four elements: start line, start column,
+ // end line (optional, otherwise assumed same as start line), end column.
+ // These are packed into a single field for efficiency. Note that line
+ // and column numbers are zero-based -- typically you will want to add
+ // 1 to each before displaying to a user.
+ repeated int32 span = 2 [packed=true];
+
+ // If this SourceCodeInfo represents a complete declaration, these are any
+ // comments appearing before and after the declaration which appear to be
+ // attached to the declaration.
+ //
+ // A series of line comments appearing on consecutive lines, with no other
+ // tokens appearing on those lines, will be treated as a single comment.
+ //
+ // leading_detached_comments will keep paragraphs of comments that appear
+ // before (but not connected to) the current element. Each paragraph,
+ // separated by empty lines, will be one comment element in the repeated
+ // field.
+ //
+ // Only the comment content is provided; comment markers (e.g. //) are
+ // stripped out. For block comments, leading whitespace and an asterisk
+ // will be stripped from the beginning of each line other than the first.
+ // Newlines are included in the output.
+ //
+ // Examples:
+ //
+ // optional int32 foo = 1; // Comment attached to foo.
+ // // Comment attached to bar.
+ // optional int32 bar = 2;
+ //
+ // optional string baz = 3;
+ // // Comment attached to baz.
+ // // Another line attached to baz.
+ //
+ // // Comment attached to qux.
+ // //
+ // // Another line attached to qux.
+ // optional double qux = 4;
+ //
+ // // Detached comment for corge. This is not leading or trailing comments
+ // // to qux or corge because there are blank lines separating it from
+ // // both.
+ //
+ // // Detached comment for corge paragraph 2.
+ //
+ // optional string corge = 5;
+ // /* Block comment attached
+ // * to corge. Leading asterisks
+ // * will be removed. */
+ // /* Block comment attached to
+ // * grault. */
+ // optional int32 grault = 6;
+ //
+ // // ignored detached comments.
+ optional string leading_comments = 3;
+ optional string trailing_comments = 4;
+ repeated string leading_detached_comments = 6;
+ }
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+message GeneratedCodeInfo {
+ // An Annotation connects some span of text in generated code to an element
+ // of its generating .proto file.
+ repeated Annotation annotation = 1;
+ message Annotation {
+ // Identifies the element in the original source .proto file. This field
+ // is formatted the same as SourceCodeInfo.Location.path.
+ repeated int32 path = 1 [packed=true];
+
+ // Identifies the filesystem path to the original source .proto.
+ optional string source_file = 2;
+
+ // Identifies the starting offset in bytes in the generated code
+ // that relates to the identified object.
+ optional int32 begin = 3;
+
+ // Identifies the ending offset in bytes in the generated code that
+ // relates to the identified offset. The end offset should be one past
+ // the last relevant byte (so the length of the text = end - begin).
+ optional int32 end = 4;
+ }
+}
diff --git a/src/nimpb_buildpkg/protobuf/include/google/protobuf/duration.proto b/src/nimpb_buildpkg/protobuf/include/google/protobuf/duration.proto
new file mode 100644
index 0000000..975fce4
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/include/google/protobuf/duration.proto
@@ -0,0 +1,117 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/duration";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DurationProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (durations.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+message Duration {
+
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+ int64 seconds = 1;
+
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ int32 nanos = 2;
+}
diff --git a/src/nimpb_buildpkg/protobuf/include/google/protobuf/empty.proto b/src/nimpb_buildpkg/protobuf/include/google/protobuf/empty.proto
new file mode 100644
index 0000000..03cacd2
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/include/google/protobuf/empty.proto
@@ -0,0 +1,52 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/empty";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "EmptyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+// service Foo {
+// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+// }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+message Empty {}
diff --git a/src/nimpb_buildpkg/protobuf/include/google/protobuf/field_mask.proto b/src/nimpb_buildpkg/protobuf/include/google/protobuf/field_mask.proto
new file mode 100644
index 0000000..eb96ba0
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/include/google/protobuf/field_mask.proto
@@ -0,0 +1,252 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "FieldMaskProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option go_package = "google.golang.org/genproto/protobuf/field_mask;field_mask";
+
+// `FieldMask` represents a set of symbolic field paths, for example:
+//
+// paths: "f.a"
+// paths: "f.b.d"
+//
+// Here `f` represents a field in some root message, `a` and `b`
+// fields in the message found in `f`, and `d` a field found in the
+// message in `f.b`.
+//
+// Field masks are used to specify a subset of fields that should be
+// returned by a get operation or modified by an update operation.
+// Field masks also have a custom JSON encoding (see below).
+//
+// # Field Masks in Projections
+//
+// When used in the context of a projection, a response message or
+// sub-message is filtered by the API to only contain those fields as
+// specified in the mask. For example, if the mask in the previous
+// example is applied to a response message as follows:
+//
+// f {
+// a : 22
+// b {
+// d : 1
+// x : 2
+// }
+// y : 13
+// }
+// z: 8
+//
+// The result will not contain specific values for fields x,y and z
+// (their value will be set to the default, and omitted in proto text
+// output):
+//
+//
+// f {
+// a : 22
+// b {
+// d : 1
+// }
+// }
+//
+// A repeated field is not allowed except at the last position of a
+// paths string.
+//
+// If a FieldMask object is not present in a get operation, the
+// operation applies to all fields (as if a FieldMask of all fields
+// had been specified).
+//
+// Note that a field mask does not necessarily apply to the
+// top-level response message. In case of a REST get operation, the
+// field mask applies directly to the response, but in case of a REST
+// list operation, the mask instead applies to each individual message
+// in the returned resource list. In case of a REST custom method,
+// other definitions may be used. Where the mask applies will be
+// clearly documented together with its declaration in the API. In
+// any case, the effect on the returned resource/resources is required
+// behavior for APIs.
+//
+// # Field Masks in Update Operations
+//
+// A field mask in update operations specifies which fields of the
+// targeted resource are going to be updated. The API is required
+// to only change the values of the fields as specified in the mask
+// and leave the others untouched. If a resource is passed in to
+// describe the updated values, the API ignores the values of all
+// fields not covered by the mask.
+//
+// If a repeated field is specified for an update operation, the existing
+// repeated values in the target resource will be overwritten by the new values.
+// Note that a repeated field is only allowed in the last position of a `paths`
+// string.
+//
+// If a sub-message is specified in the last position of the field mask for an
+// update operation, then the existing sub-message in the target resource is
+// overwritten. Given the target message:
+//
+// f {
+// b {
+// d : 1
+// x : 2
+// }
+// c : 1
+// }
+//
+// And an update message:
+//
+// f {
+// b {
+// d : 10
+// }
+// }
+//
+// then if the field mask is:
+//
+// paths: "f.b"
+//
+// then the result will be:
+//
+// f {
+// b {
+// d : 10
+// }
+// c : 1
+// }
+//
+// However, if the update mask was:
+//
+// paths: "f.b.d"
+//
+// then the result would be:
+//
+// f {
+// b {
+// d : 10
+// x : 2
+// }
+// c : 1
+// }
+//
+// In order to reset a field's value to the default, the field must
+// be in the mask and set to the default value in the provided resource.
+// Hence, in order to reset all fields of a resource, provide a default
+// instance of the resource and set all fields in the mask, or do
+// not provide a mask as described below.
+//
+// If a field mask is not present on update, the operation applies to
+// all fields (as if a field mask of all fields has been specified).
+// Note that in the presence of schema evolution, this may mean that
+// fields the client does not know and has therefore not filled into
+// the request will be reset to their default. If this is unwanted
+// behavior, a specific service may require a client to always specify
+// a field mask, producing an error if not.
+//
+// As with get operations, the location of the resource which
+// describes the updated values in the request message depends on the
+// operation kind. In any case, the effect of the field mask is
+// required to be honored by the API.
+//
+// ## Considerations for HTTP REST
+//
+// The HTTP kind of an update operation which uses a field mask must
+// be set to PATCH instead of PUT in order to satisfy HTTP semantics
+// (PUT must only be used for full updates).
+//
+// # JSON Encoding of Field Masks
+//
+// In JSON, a field mask is encoded as a single string where paths are
+// separated by a comma. Fields name in each path are converted
+// to/from lower-camel naming conventions.
+//
+// As an example, consider the following message declarations:
+//
+// message Profile {
+// User user = 1;
+// Photo photo = 2;
+// }
+// message User {
+// string display_name = 1;
+// string address = 2;
+// }
+//
+// In proto a field mask for `Profile` may look as such:
+//
+// mask {
+// paths: "user.display_name"
+// paths: "photo"
+// }
+//
+// In JSON, the same mask is represented as below:
+//
+// {
+// mask: "user.displayName,photo"
+// }
+//
+// # Field Masks and Oneof Fields
+//
+// Field masks treat fields in oneofs just as regular fields. Consider the
+// following message:
+//
+// message SampleMessage {
+// oneof test_oneof {
+// string name = 4;
+// SubMessage sub_message = 9;
+// }
+// }
+//
+// The field mask can be:
+//
+// mask {
+// paths: "name"
+// }
+//
+// Or:
+//
+// mask {
+// paths: "sub_message"
+// }
+//
+// Note that oneof type names ("test_oneof" in this case) cannot be used in
+// paths.
+//
+// ## Field Mask Verification
+//
+// The implementation of the all the API methods, which have any FieldMask type
+// field in the request, should verify the included field paths, and return
+// `INVALID_ARGUMENT` error if any path is duplicated or unmappable.
+message FieldMask {
+ // The set of field mask paths.
+ repeated string paths = 1;
+}
diff --git a/src/nimpb_buildpkg/protobuf/include/google/protobuf/source_context.proto b/src/nimpb_buildpkg/protobuf/include/google/protobuf/source_context.proto
new file mode 100644
index 0000000..f3b2c96
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/include/google/protobuf/source_context.proto
@@ -0,0 +1,48 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "SourceContextProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option go_package = "google.golang.org/genproto/protobuf/source_context;source_context";
+
+// `SourceContext` represents information about the source of a
+// protobuf element, like the file in which it is defined.
+message SourceContext {
+ // The path-qualified name of the .proto file that contained the associated
+ // protobuf element. For example: `"google/protobuf/source_context.proto"`.
+ string file_name = 1;
+}
diff --git a/src/nimpb_buildpkg/protobuf/include/google/protobuf/struct.proto b/src/nimpb_buildpkg/protobuf/include/google/protobuf/struct.proto
new file mode 100644
index 0000000..7d7808e
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/include/google/protobuf/struct.proto
@@ -0,0 +1,96 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "StructProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+message Struct {
+ // Unordered map of dynamically typed values.
+ map<string, Value> fields = 1;
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+message Value {
+ // The kind of value.
+ oneof kind {
+ // Represents a null value.
+ NullValue null_value = 1;
+ // Represents a double value.
+ double number_value = 2;
+ // Represents a string value.
+ string string_value = 3;
+ // Represents a boolean value.
+ bool bool_value = 4;
+ // Represents a structured value.
+ Struct struct_value = 5;
+ // Represents a repeated `Value`.
+ ListValue list_value = 6;
+ }
+}
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+enum NullValue {
+ // Null value.
+ NULL_VALUE = 0;
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+message ListValue {
+ // Repeated field of dynamically typed values.
+ repeated Value values = 1;
+}
diff --git a/src/nimpb_buildpkg/protobuf/include/google/protobuf/timestamp.proto b/src/nimpb_buildpkg/protobuf/include/google/protobuf/timestamp.proto
new file mode 100644
index 0000000..06750ab
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/include/google/protobuf/timestamp.proto
@@ -0,0 +1,133 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/timestamp";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TimestampProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required, though only UTC (as indicated by "Z") is presently supported.
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
+// to obtain a formatter capable of generating timestamps in this format.
+//
+//
+message Timestamp {
+
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ int64 seconds = 1;
+
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ int32 nanos = 2;
+}
diff --git a/src/nimpb_buildpkg/protobuf/include/google/protobuf/type.proto b/src/nimpb_buildpkg/protobuf/include/google/protobuf/type.proto
new file mode 100644
index 0000000..624c15e
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/include/google/protobuf/type.proto
@@ -0,0 +1,187 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+import "google/protobuf/any.proto";
+import "google/protobuf/source_context.proto";
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TypeProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option go_package = "google.golang.org/genproto/protobuf/ptype;ptype";
+
+// A protocol buffer message type.
+message Type {
+ // The fully qualified message name.
+ string name = 1;
+ // The list of fields.
+ repeated Field fields = 2;
+ // The list of types appearing in `oneof` definitions in this type.
+ repeated string oneofs = 3;
+ // The protocol buffer options.
+ repeated Option options = 4;
+ // The source context.
+ SourceContext source_context = 5;
+ // The source syntax.
+ Syntax syntax = 6;
+}
+
+// A single field of a message type.
+message Field {
+ // Basic field types.
+ enum Kind {
+ // Field type unknown.
+ TYPE_UNKNOWN = 0;
+ // Field type double.
+ TYPE_DOUBLE = 1;
+ // Field type float.
+ TYPE_FLOAT = 2;
+ // Field type int64.
+ TYPE_INT64 = 3;
+ // Field type uint64.
+ TYPE_UINT64 = 4;
+ // Field type int32.
+ TYPE_INT32 = 5;
+ // Field type fixed64.
+ TYPE_FIXED64 = 6;
+ // Field type fixed32.
+ TYPE_FIXED32 = 7;
+ // Field type bool.
+ TYPE_BOOL = 8;
+ // Field type string.
+ TYPE_STRING = 9;
+ // Field type group. Proto2 syntax only, and deprecated.
+ TYPE_GROUP = 10;
+ // Field type message.
+ TYPE_MESSAGE = 11;
+ // Field type bytes.
+ TYPE_BYTES = 12;
+ // Field type uint32.
+ TYPE_UINT32 = 13;
+ // Field type enum.
+ TYPE_ENUM = 14;
+ // Field type sfixed32.
+ TYPE_SFIXED32 = 15;
+ // Field type sfixed64.
+ TYPE_SFIXED64 = 16;
+ // Field type sint32.
+ TYPE_SINT32 = 17;
+ // Field type sint64.
+ TYPE_SINT64 = 18;
+ };
+
+ // Whether a field is optional, required, or repeated.
+ enum Cardinality {
+ // For fields with unknown cardinality.
+ CARDINALITY_UNKNOWN = 0;
+ // For optional fields.
+ CARDINALITY_OPTIONAL = 1;
+ // For required fields. Proto2 syntax only.
+ CARDINALITY_REQUIRED = 2;
+ // For repeated fields.
+ CARDINALITY_REPEATED = 3;
+ };
+
+ // The field type.
+ Kind kind = 1;
+ // The field cardinality.
+ Cardinality cardinality = 2;
+ // The field number.
+ int32 number = 3;
+ // The field name.
+ string name = 4;
+ // The field type URL, without the scheme, for message or enumeration
+ // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`.
+ string type_url = 6;
+ // The index of the field type in `Type.oneofs`, for message or enumeration
+ // types. The first type has index 1; zero means the type is not in the list.
+ int32 oneof_index = 7;
+ // Whether to use alternative packed wire representation.
+ bool packed = 8;
+ // The protocol buffer options.
+ repeated Option options = 9;
+ // The field JSON name.
+ string json_name = 10;
+ // The string value of the default value of this field. Proto2 syntax only.
+ string default_value = 11;
+}
+
+// Enum type definition.
+message Enum {
+ // Enum type name.
+ string name = 1;
+ // Enum value definitions.
+ repeated EnumValue enumvalue = 2;
+ // Protocol buffer options.
+ repeated Option options = 3;
+ // The source context.
+ SourceContext source_context = 4;
+ // The source syntax.
+ Syntax syntax = 5;
+}
+
+// Enum value definition.
+message EnumValue {
+ // Enum value name.
+ string name = 1;
+ // Enum value number.
+ int32 number = 2;
+ // Protocol buffer options.
+ repeated Option options = 3;
+}
+
+// A protocol buffer option, which can be attached to a message, field,
+// enumeration, etc.
+message Option {
+ // The option's name. For protobuf built-in options (options defined in
+ // descriptor.proto), this is the short name. For example, `"map_entry"`.
+ // For custom options, it should be the fully-qualified name. For example,
+ // `"google.api.http"`.
+ string name = 1;
+ // The option's value packed in an Any message. If the value is a primitive,
+ // the corresponding wrapper type defined in google/protobuf/wrappers.proto
+ // should be used. If the value is an enum, it should be stored as an int32
+ // value using the google.protobuf.Int32Value type.
+ Any value = 2;
+}
+
+// The syntax in which a protocol buffer element is defined.
+enum Syntax {
+ // Syntax `proto2`.
+ SYNTAX_PROTO2 = 0;
+ // Syntax `proto3`.
+ SYNTAX_PROTO3 = 1;
+}
diff --git a/src/nimpb_buildpkg/protobuf/include/google/protobuf/wrappers.proto b/src/nimpb_buildpkg/protobuf/include/google/protobuf/wrappers.proto
new file mode 100644
index 0000000..0194763
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/include/google/protobuf/wrappers.proto
@@ -0,0 +1,118 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Wrappers for primitive (non-message) types. These types are useful
+// for embedding primitives in the `google.protobuf.Any` type and for places
+// where we need to distinguish between the absence of a primitive
+// typed field and its default value.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/wrappers";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "WrappersProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+message DoubleValue {
+ // The double value.
+ double value = 1;
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+message FloatValue {
+ // The float value.
+ float value = 1;
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+message Int64Value {
+ // The int64 value.
+ int64 value = 1;
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+message UInt64Value {
+ // The uint64 value.
+ uint64 value = 1;
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+message Int32Value {
+ // The int32 value.
+ int32 value = 1;
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+message UInt32Value {
+ // The uint32 value.
+ uint32 value = 1;
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+message BoolValue {
+ // The bool value.
+ bool value = 1;
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+message StringValue {
+ // The string value.
+ string value = 1;
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+message BytesValue {
+ // The bytes value.
+ bytes value = 1;
+}
diff --git a/src/nimpb_buildpkg/protobuf/protoc-linux-aarch_64 b/src/nimpb_buildpkg/protobuf/protoc-linux-aarch_64
new file mode 100755
index 0000000..be19f78
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/protoc-linux-aarch_64
Binary files differ
diff --git a/src/nimpb_buildpkg/protobuf/protoc-linux-x86_32 b/src/nimpb_buildpkg/protobuf/protoc-linux-x86_32
new file mode 100755
index 0000000..96516b5
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/protoc-linux-x86_32
Binary files differ
diff --git a/src/nimpb_buildpkg/protobuf/protoc-linux-x86_64 b/src/nimpb_buildpkg/protobuf/protoc-linux-x86_64
new file mode 100755
index 0000000..07689c0
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/protoc-linux-x86_64
Binary files differ
diff --git a/src/nimpb_buildpkg/protobuf/protoc-osx-x86_64 b/src/nimpb_buildpkg/protobuf/protoc-osx-x86_64
new file mode 100755
index 0000000..3996f4d
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/protoc-osx-x86_64
Binary files differ
diff --git a/src/nimpb_buildpkg/protobuf/protoc-win32.exe b/src/nimpb_buildpkg/protobuf/protoc-win32.exe
new file mode 100755
index 0000000..16bc9c3
--- /dev/null
+++ b/src/nimpb_buildpkg/protobuf/protoc-win32.exe
Binary files differ