workspace

This commit is contained in:
nora 2024-04-14 22:03:39 +02:00
parent 43747f9cda
commit eea39e1f16
22 changed files with 166 additions and 107 deletions

25
terustform/Cargo.toml Normal file
View file

@ -0,0 +1,25 @@
[package]
name = "terustform"
version = "0.1.0"
edition = "2021"
[dependencies]
base64 = "0.22.0"
eyre = "0.6.12"
prost = "0.12.4"
rcgen = "0.13.1"
rmp = "0.8.12"
rustls = { version = "0.23.4", default-features = false, features = ["ring", "logging", "std", "tls12"] }
serde = "1.0.197"
serde_json = "1.0.115"
tempfile = "3.10.1"
time = "0.3.35"
tokio = { version = "1.37.0", features = ["full"] }
tokio-stream = { version = "0.1.15", features = ["net"] }
tokio-util = "0.7.10"
tonic = { version = "0.11.0", features = ["tls"] }
tracing = "0.1.40"
tracing-subscriber = "0.3.18"
[build-dependencies]
tonic-build = "0.11.0"

5
terustform/build.rs Normal file
View file

@ -0,0 +1,5 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::compile_protos("proto/tfplugin6.6.proto")?;
tonic_build::compile_protos("proto/controller.proto")?;
Ok(())
}

View file

@ -0,0 +1,12 @@
syntax = "proto3";
package plugin;
option go_package = "./plugin";
message Empty {
}
// The GRPCController is responsible for telling the plugin server to shutdown.
service GRPCController {
rpc Shutdown(Empty) returns (Empty);
}

View file

@ -0,0 +1,604 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Terraform Plugin RPC protocol version 6.6
//
// This file defines version 6.6 of the RPC protocol. To implement a plugin
// against this protocol, copy this definition into your own codebase and
// use protoc to generate stubs for your target language.
//
// This file will not be updated. Any minor versions of protocol 6 to follow
// should copy this file and modify the copy while maintaing backwards
// compatibility. Breaking changes, if any are required, will come
// in a subsequent major version with its own separate proto definition.
//
// Note that only the proto files included in a release tag of Terraform are
// official protocol releases. Proto files taken from other commits may include
// incomplete changes or features that did not make it into a final release.
// In all reasonable cases, plugin developers should take the proto file from
// the tag of the most recent release of Terraform, and not from the main
// branch or any other development branch.
//
syntax = "proto3";
option go_package = "github.com/hashicorp/terraform/internal/tfplugin6";
package tfplugin6;
// DynamicValue is an opaque encoding of terraform data, with the field name
// indicating the encoding scheme used.
message DynamicValue {
bytes msgpack = 1;
bytes json = 2;
}
message Diagnostic {
enum Severity {
INVALID = 0;
ERROR = 1;
WARNING = 2;
}
Severity severity = 1;
string summary = 2;
string detail = 3;
AttributePath attribute = 4;
}
message FunctionError {
string text = 1;
// The optional function_argument records the index position of the
// argument which caused the error.
optional int64 function_argument = 2;
}
message AttributePath {
message Step {
oneof selector {
// Set "attribute_name" to represent looking up an attribute
// in the current object value.
string attribute_name = 1;
// Set "element_key_*" to represent looking up an element in
// an indexable collection type.
string element_key_string = 2;
int64 element_key_int = 3;
}
}
repeated Step steps = 1;
}
message StopProvider {
message Request {
}
message Response {
string Error = 1;
}
}
// RawState holds the stored state for a resource to be upgraded by the
// provider. It can be in one of two formats, the current json encoded format
// in bytes, or the legacy flatmap format as a map of strings.
message RawState {
bytes json = 1;
map<string, string> flatmap = 2;
}
enum StringKind {
PLAIN = 0;
MARKDOWN = 1;
}
// Schema is the configuration schema for a Resource or Provider.
message Schema {
message Block {
int64 version = 1;
repeated Attribute attributes = 2;
repeated NestedBlock block_types = 3;
string description = 4;
StringKind description_kind = 5;
bool deprecated = 6;
}
message Attribute {
string name = 1;
bytes type = 2;
Object nested_type = 10;
string description = 3;
bool required = 4;
bool optional = 5;
bool computed = 6;
bool sensitive = 7;
StringKind description_kind = 8;
bool deprecated = 9;
}
message NestedBlock {
enum NestingMode {
INVALID = 0;
SINGLE = 1;
LIST = 2;
SET = 3;
MAP = 4;
GROUP = 5;
}
string type_name = 1;
Block block = 2;
NestingMode nesting = 3;
int64 min_items = 4;
int64 max_items = 5;
}
message Object {
enum NestingMode {
INVALID = 0;
SINGLE = 1;
LIST = 2;
SET = 3;
MAP = 4;
}
repeated Attribute attributes = 1;
NestingMode nesting = 3;
// MinItems and MaxItems were never used in the protocol, and have no
// effect on validation.
int64 min_items = 4 [deprecated = true];
int64 max_items = 5 [deprecated = true];
}
// The version of the schema.
// Schemas are versioned, so that providers can upgrade a saved resource
// state when the schema is changed.
int64 version = 1;
// Block is the top level configuration block for this schema.
Block block = 2;
}
message Function {
// parameters is the ordered list of positional function parameters.
repeated Parameter parameters = 1;
// variadic_parameter is an optional final parameter which accepts
// zero or more argument values, in which Terraform will send an
// ordered list of the parameter type.
Parameter variadic_parameter = 2;
// Return is the function return parameter.
Return return = 3;
// summary is the human-readable shortened documentation for the function.
string summary = 4;
// description is human-readable documentation for the function.
string description = 5;
// description_kind is the formatting of the description.
StringKind description_kind = 6;
// deprecation_message is human-readable documentation if the
// function is deprecated.
string deprecation_message = 7;
message Parameter {
// name is the human-readable display name for the parameter.
string name = 1;
// type is the type constraint for the parameter.
bytes type = 2;
// allow_null_value when enabled denotes that a null argument value can
// be passed to the provider. When disabled, Terraform returns an error
// if the argument value is null.
bool allow_null_value = 3;
// allow_unknown_values when enabled denotes that only wholly known
// argument values will be passed to the provider. When disabled,
// Terraform skips the function call entirely and assumes an unknown
// value result from the function.
bool allow_unknown_values = 4;
// description is human-readable documentation for the parameter.
string description = 5;
// description_kind is the formatting of the description.
StringKind description_kind = 6;
}
message Return {
// type is the type constraint for the function result.
bytes type = 1;
}
}
// ServerCapabilities allows providers to communicate extra information
// regarding supported protocol features. This is used to indicate
// availability of certain forward-compatible changes which may be optional
// in a major protocol version, but cannot be tested for directly.
message ServerCapabilities {
// The plan_destroy capability signals that a provider expects a call
// to PlanResourceChange when a resource is going to be destroyed.
bool plan_destroy = 1;
// The get_provider_schema_optional capability indicates that this
// provider does not require calling GetProviderSchema to operate
// normally, and the caller can used a cached copy of the provider's
// schema.
bool get_provider_schema_optional = 2;
// The move_resource_state capability signals that a provider supports the
// MoveResourceState RPC.
bool move_resource_state = 3;
}
// Deferred is a message that indicates that change is deferred for a reason.
message Deferred {
// Reason is the reason for deferring the change.
enum Reason {
// UNKNOWN is the default value, and should not be used.
UNKNOWN = 0;
// RESOURCE_CONFIG_UNKNOWN is used when the config is partially unknown and the real
// values need to be known before the change can be planned.
RESOURCE_CONFIG_UNKNOWN = 1;
// PROVIDER_CONFIG_UNKNOWN is used when parts of the provider configuration
// are unknown, e.g. the provider configuration is only known after the apply is done.
PROVIDER_CONFIG_UNKNOWN = 2;
// ABSENT_PREREQ is used when a hard dependency has not been satisfied.
ABSENT_PREREQ = 3;
}
// reason is the reason for deferring the change.
Reason reason = 1;
}
service Provider {
//////// Information about what a provider supports/expects
// GetMetadata returns upfront information about server capabilities and
// supported resource types without requiring the server to instantiate all
// schema information, which may be memory intensive. This RPC is optional,
// where clients may receive an unimplemented RPC error. Clients should
// ignore the error and call the GetProviderSchema RPC as a fallback.
rpc GetMetadata(GetMetadata.Request) returns (GetMetadata.Response);
// GetSchema returns schema information for the provider, data resources,
// and managed resources.
rpc GetProviderSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response);
rpc ValidateProviderConfig(ValidateProviderConfig.Request) returns (ValidateProviderConfig.Response);
rpc ValidateResourceConfig(ValidateResourceConfig.Request) returns (ValidateResourceConfig.Response);
rpc ValidateDataResourceConfig(ValidateDataResourceConfig.Request) returns (ValidateDataResourceConfig.Response);
rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response);
//////// One-time initialization, called before other functions below
rpc ConfigureProvider(ConfigureProvider.Request) returns (ConfigureProvider.Response);
//////// Managed Resource Lifecycle
rpc ReadResource(ReadResource.Request) returns (ReadResource.Response);
rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response);
rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response);
rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response);
rpc MoveResourceState(MoveResourceState.Request) returns (MoveResourceState.Response);
rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response);
// GetFunctions returns the definitions of all functions.
rpc GetFunctions(GetFunctions.Request) returns (GetFunctions.Response);
//////// Provider-contributed Functions
rpc CallFunction(CallFunction.Request) returns (CallFunction.Response);
//////// Graceful Shutdown
rpc StopProvider(StopProvider.Request) returns (StopProvider.Response);
}
message GetMetadata {
message Request {
}
message Response {
ServerCapabilities server_capabilities = 1;
repeated Diagnostic diagnostics = 2;
repeated DataSourceMetadata data_sources = 3;
repeated ResourceMetadata resources = 4;
// functions returns metadata for any functions.
repeated FunctionMetadata functions = 5;
}
message FunctionMetadata {
// name is the function name.
string name = 1;
}
message DataSourceMetadata {
string type_name = 1;
}
message ResourceMetadata {
string type_name = 1;
}
}
message GetProviderSchema {
message Request {
}
message Response {
Schema provider = 1;
map<string, Schema> resource_schemas = 2;
map<string, Schema> data_source_schemas = 3;
map<string, Function> functions = 7;
repeated Diagnostic diagnostics = 4;
Schema provider_meta = 5;
ServerCapabilities server_capabilities = 6;
}
}
message ValidateProviderConfig {
message Request {
DynamicValue config = 1;
}
message Response {
repeated Diagnostic diagnostics = 2;
}
}
message UpgradeResourceState {
// Request is the message that is sent to the provider during the
// UpgradeResourceState RPC.
//
// This message intentionally does not include configuration data as any
// configuration-based or configuration-conditional changes should occur
// during the PlanResourceChange RPC. Additionally, the configuration is
// not guaranteed to exist (in the case of resource destruction), be wholly
// known, nor match the given prior state, which could lead to unexpected
// provider behaviors for practitioners.
message Request {
string type_name = 1;
// version is the schema_version number recorded in the state file
int64 version = 2;
// raw_state is the raw states as stored for the resource. Core does
// not have access to the schema of prior_version, so it's the
// provider's responsibility to interpret this value using the
// appropriate older schema. The raw_state will be the json encoded
// state, or a legacy flat-mapped format.
RawState raw_state = 3;
}
message Response {
// new_state is a msgpack-encoded data structure that, when interpreted with
// the _current_ schema for this resource type, is functionally equivalent to
// that which was given in prior_state_raw.
DynamicValue upgraded_state = 1;
// diagnostics describes any errors encountered during migration that could not
// be safely resolved, and warnings about any possibly-risky assumptions made
// in the upgrade process.
repeated Diagnostic diagnostics = 2;
}
}
message ValidateResourceConfig {
message Request {
string type_name = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ValidateDataResourceConfig {
message Request {
string type_name = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ConfigureProvider {
message Request {
string terraform_version = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ReadResource {
// Request is the message that is sent to the provider during the
// ReadResource RPC.
//
// This message intentionally does not include configuration data as any
// configuration-based or configuration-conditional changes should occur
// during the PlanResourceChange RPC. Additionally, the configuration is
// not guaranteed to be wholly known nor match the given prior state, which
// could lead to unexpected provider behaviors for practitioners.
message Request {
string type_name = 1;
DynamicValue current_state = 2;
bytes private = 3;
DynamicValue provider_meta = 4;
// deferral_allowed signals that the provider is allowed to defer the
// changes. If set the caller needs to handle the deferred response.
bool deferral_allowed = 5;
}
message Response {
DynamicValue new_state = 1;
repeated Diagnostic diagnostics = 2;
bytes private = 3;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 4;
}
}
message PlanResourceChange {
message Request {
string type_name = 1;
DynamicValue prior_state = 2;
DynamicValue proposed_new_state = 3;
DynamicValue config = 4;
bytes prior_private = 5;
DynamicValue provider_meta = 6;
// deferral_allowed signals that the provider is allowed to defer the
// changes. If set the caller needs to handle the deferred response.
bool deferral_allowed = 7;
}
message Response {
DynamicValue planned_state = 1;
repeated AttributePath requires_replace = 2;
bytes planned_private = 3;
repeated Diagnostic diagnostics = 4;
// This may be set only by the helper/schema "SDK" in the main Terraform
// repository, to request that Terraform Core >=0.12 permit additional
// inconsistencies that can result from the legacy SDK type system
// and its imprecise mapping to the >=0.12 type system.
// The change in behavior implied by this flag makes sense only for the
// specific details of the legacy SDK type system, and are not a general
// mechanism to avoid proper type handling in providers.
//
// ==== DO NOT USE THIS ====
// ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
// ==== DO NOT USE THIS ====
bool legacy_type_system = 5;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 6;
}
}
message ApplyResourceChange {
message Request {
string type_name = 1;
DynamicValue prior_state = 2;
DynamicValue planned_state = 3;
DynamicValue config = 4;
bytes planned_private = 5;
DynamicValue provider_meta = 6;
}
message Response {
DynamicValue new_state = 1;
bytes private = 2;
repeated Diagnostic diagnostics = 3;
// This may be set only by the helper/schema "SDK" in the main Terraform
// repository, to request that Terraform Core >=0.12 permit additional
// inconsistencies that can result from the legacy SDK type system
// and its imprecise mapping to the >=0.12 type system.
// The change in behavior implied by this flag makes sense only for the
// specific details of the legacy SDK type system, and are not a general
// mechanism to avoid proper type handling in providers.
//
// ==== DO NOT USE THIS ====
// ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
// ==== DO NOT USE THIS ====
bool legacy_type_system = 4;
}
}
message ImportResourceState {
message Request {
string type_name = 1;
string id = 2;
// deferral_allowed signals that the provider is allowed to defer the
// changes. If set the caller needs to handle the deferred response.
bool deferral_allowed = 3;
}
message ImportedResource {
string type_name = 1;
DynamicValue state = 2;
bytes private = 3;
}
message Response {
repeated ImportedResource imported_resources = 1;
repeated Diagnostic diagnostics = 2;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 3;
}
}
message MoveResourceState {
message Request {
// The address of the provider the resource is being moved from.
string source_provider_address = 1;
// The resource type that the resource is being moved from.
string source_type_name = 2;
// The schema version of the resource type that the resource is being
// moved from.
int64 source_schema_version = 3;
// The raw state of the resource being moved. Only the json field is
// populated, as there should be no legacy providers using the flatmap
// format that support newly introduced RPCs.
RawState source_state = 4;
// The resource type that the resource is being moved to.
string target_type_name = 5;
// The private state of the resource being moved.
bytes source_private = 6;
}
message Response {
// The state of the resource after it has been moved.
DynamicValue target_state = 1;
// Any diagnostics that occurred during the move.
repeated Diagnostic diagnostics = 2;
// The private state of the resource after it has been moved.
bytes target_private = 3;
}
}
message ReadDataSource {
message Request {
string type_name = 1;
DynamicValue config = 2;
DynamicValue provider_meta = 3;
// deferral_allowed signals that the provider is allowed to defer the
// changes. If set the caller needs to handle the deferred response.
bool deferral_allowed = 4;
}
message Response {
DynamicValue state = 1;
repeated Diagnostic diagnostics = 2;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 3;
}
}
message GetFunctions {
message Request {}
message Response {
// functions is a mapping of function names to definitions.
map<string, Function> functions = 1;
// diagnostics is any warnings or errors.
repeated Diagnostic diagnostics = 2;
}
}
message CallFunction {
message Request {
string name = 1;
repeated DynamicValue arguments = 2;
}
message Response {
DynamicValue result = 1;
FunctionError error = 2;
}
}

29
terustform/src/cert.rs Normal file
View file

@ -0,0 +1,29 @@
use eyre::{Context, Result};
use rcgen::{DistinguishedName, DnType, IsCa, KeyUsagePurpose};
use time::Duration;
pub fn generate_cert() -> Result<(tonic::transport::Identity, rcgen::Certificate)> {
// https://github.com/hashicorp/go-plugin/blob/8d2aaa458971cba97c3bfec1b0380322e024b514/mtls.go#L20
let keypair = rcgen::KeyPair::generate_for(&rcgen::PKCS_ECDSA_P256_SHA256)
.wrap_err("failed to generate keypair")?;
let mut params =
rcgen::CertificateParams::new(["localhost".to_owned()]).wrap_err("creating cert params")?;
params.key_usages = vec![
KeyUsagePurpose::DigitalSignature,
KeyUsagePurpose::KeyEncipherment,
KeyUsagePurpose::KeyAgreement,
KeyUsagePurpose::KeyCertSign,
];
params.is_ca = IsCa::Ca(rcgen::BasicConstraints::Unconstrained);
params.not_before = time::OffsetDateTime::now_utc().saturating_add(Duration::seconds(-30));
params.not_after = time::OffsetDateTime::now_utc().saturating_add(Duration::seconds(262980));
let mut dn = DistinguishedName::new();
dn.push(DnType::OrganizationName, "HashiCorp");
dn.push(DnType::CommonName, "localhost");
params.distinguished_name = dn;
let cert = params.self_signed(&keypair).wrap_err("signing cert")?;
Ok((tonic::transport::Identity::from_pem(cert.pem(), keypair.serialize_pem()), cert))
}

View file

@ -0,0 +1,81 @@
use std::collections::HashMap;
use crate::values::{Type, Value};
use super::DResult;
pub trait DataSource: Send + Sync {
fn name(&self, provider_name: &str) -> String;
fn schema(&self) -> Schema;
// todo: probably want some kind of Value+Schema thing like tfsdk? whatever.
fn read(&self, config: Value) -> DResult<Value>;
fn erase(self) -> Box<dyn DataSource>
where
Self: Sized + 'static,
{
Box::new(self)
}
}
pub struct Schema {
pub description: String,
pub attributes: HashMap<String, Attribute>,
}
pub enum Attribute {
String {
description: String,
mode: Mode,
sensitive: bool,
},
Int64 {
description: String,
mode: Mode,
sensitive: bool,
},
}
#[derive(Clone, Copy)]
pub enum Mode {
Required,
Optional,
OptionalComputed,
Computed,
}
impl Mode {
pub fn required(&self) -> bool {
matches!(self, Self::Required)
}
pub fn optional(&self) -> bool {
matches!(self, Self::Optional | Self::OptionalComputed)
}
pub fn computed(&self) -> bool {
matches!(self, Self::OptionalComputed | Self::Computed)
}
}
impl Schema {
pub fn typ(&self) -> Type {
let attrs = self
.attributes
.iter()
.map(|(name, attr)| {
let attr_type = match attr {
Attribute::Int64 { .. } => Type::Number,
Attribute::String { .. } => Type::String,
};
(name.clone(), attr_type)
})
.collect();
Type::Object {
attrs,
optionals: vec![],
}
}
}

View file

@ -0,0 +1,27 @@
#![allow(dead_code)]
pub mod datasource;
pub mod provider;
pub mod value;
use self::datasource::DataSource;
pub struct Diagnostics {
pub(crate) errors: Vec<String>,
}
pub type DResult<T> = Result<T, Diagnostics>;
impl Diagnostics {
pub fn error_string(msg: String) -> Self {
Self {
errors: vec![msg],
}
}
}
impl<E: std::error::Error + std::fmt::Debug> From<E> for Diagnostics {
fn from(value: E) -> Self {
Self::error_string(format!("{:?}", value))
}
}

View file

@ -0,0 +1,6 @@
use super::DataSource;
pub trait Provider: Send + Sync {
fn name(&self) -> String;
fn data_sources(&self) -> Vec<Box<dyn DataSource>>;
}

View file

@ -0,0 +1 @@
pub struct StringValue;

103
terustform/src/lib.rs Normal file
View file

@ -0,0 +1,103 @@
mod cert;
pub mod framework;
mod server;
pub mod values;
use std::{env, path::PathBuf};
use base64::Engine;
use eyre::{bail, Context, Result};
use framework::provider::Provider;
use tokio::net::UnixListener;
use tonic::transport::{Certificate, ServerTlsConfig};
use tracing::{info, Level};
pub async fn serve(provider: &dyn Provider) -> eyre::Result<()> {
tracing_subscriber::fmt()
.with_max_level(Level::DEBUG)
.with_writer(std::io::stderr)
.without_time()
.init();
let client_cert =
std::env::var("PLUGIN_CLIENT_CERT").wrap_err("PLUGIN_CLIENT_CERT not found")?;
let client_cert = Certificate::from_pem(client_cert);
let (server_identity, server_cert) =
cert::generate_cert().wrap_err("generating server certificate")?;
let (_tmpdir, socket) = match init_handshake(&server_cert).await {
Ok(addr) => addr,
Err(err) => {
println!("{:?}", err);
bail!("init error");
}
};
let tls = ServerTlsConfig::new()
.identity(server_identity)
.client_auth_optional(true) // ??? terraform doesn't send certs ???
.client_ca_root(client_cert);
info!("Listening on {}", socket.display());
let uds = UnixListener::bind(socket).wrap_err("failed to bind unix listener")?;
let uds_stream = tokio_stream::wrappers::UnixListenerStream::new(uds);
let shutdown = tokio_util::sync::CancellationToken::new();
let server = tonic::transport::Server::builder()
.tls_config(tls)
.wrap_err("invalid TLS config")?
.add_service(server::ProviderServer::new(
server::ProviderHandler::new(shutdown.clone(), provider),
))
.add_service(
server::GrpcControllerServer::new(
server::Controller {
shutdown: shutdown.clone(),
},
),
)
.serve_with_incoming(uds_stream);
tokio::select! {
_ = shutdown.cancelled() => {}
result = server => {
result.wrap_err("failed to start server")?;
}
}
Ok(())
}
const _MAGIC_COOKIE_KEY: &str = "TF_PLUGIN_MAGIC_COOKIE";
const _MAGIC_COOKIE_VALUE: &str =
"d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2";
async fn init_handshake(server_cert: &rcgen::Certificate) -> Result<(tempfile::TempDir, PathBuf)> {
// https://github.com/hashicorp/go-plugin/blob/8d2aaa458971cba97c3bfec1b0380322e024b514/docs/internals.md
let _ = env::var("PLUGIN_MIN_PORT")
.wrap_err("PLUGIN_MIN_PORT not found")?
.parse::<u16>()
.wrap_err("PLUGIN_MIN_PORT not an int")?;
let _ = env::var("PLUGIN_MAX_PORT")
.wrap_err("PLUGIN_MAX_PORT not found")?
.parse::<u16>()
.wrap_err("PLUGIN_MAX_PORT not an int")?;
let tmpdir = tempfile::TempDir::new().wrap_err("failed to create temporary directory")?;
let socket = tmpdir.path().join("plugin");
// https://github.com/hashicorp/go-plugin/blob/8d2aaa458971cba97c3bfec1b0380322e024b514/server.go#L426
const CORE_PROTOCOL_VERSION: u8 = 1;
const PROTO_VERSION: u8 = 6;
let listener_addr_network = "unix";
let listener_addr = socket.display();
let proto_type = "grpc";
let b64_cert = base64::prelude::BASE64_STANDARD_NO_PAD.encode(server_cert.der());
println!("{CORE_PROTOCOL_VERSION}|{PROTO_VERSION}|{listener_addr_network}|{listener_addr}|{proto_type}|{b64_cert}");
Ok((tmpdir, socket))
}

View file

@ -0,0 +1,91 @@
use crate::{
framework::{
datasource::{self, Mode},
Diagnostics,
},
values::Type,
};
use super::grpc::tfplugin6;
impl datasource::Schema {
pub(crate) fn to_tfplugin(self) -> tfplugin6::Schema {
tfplugin6::Schema {
version: 1,
block: Some(tfplugin6::schema::Block {
version: 0,
attributes: self
.attributes
.into_iter()
.map(|(name, attr)| attr.to_tfplugin(name))
.collect(),
block_types: vec![],
description: self.description,
description_kind: tfplugin6::StringKind::Markdown as _,
deprecated: false,
}),
}
}
}
impl datasource::Attribute {
pub(crate) fn to_tfplugin(self, name: String) -> tfplugin6::schema::Attribute {
let mut attr = tfplugin6::schema::Attribute {
name,
r#type: vec![],
nested_type: None,
description: "<placeholder, this is a bug in terustform>".to_owned(),
required: false,
optional: false,
computed: true,
sensitive: false,
description_kind: tfplugin6::StringKind::Markdown as _,
deprecated: false,
};
let set_modes = |attr: &mut tfplugin6::schema::Attribute, mode: Mode| {
attr.required = mode.required();
attr.optional = mode.optional();
attr.computed = mode.computed();
};
match self {
datasource::Attribute::String {
description,
mode,
sensitive,
} => {
attr.r#type = Type::String.to_json().into_bytes();
attr.description = description;
set_modes(&mut attr, mode);
attr.sensitive = sensitive;
}
datasource::Attribute::Int64 {
description,
mode,
sensitive,
} => {
attr.r#type = Type::Number.to_json().into_bytes();
attr.description = description;
set_modes(&mut attr, mode);
attr.sensitive = sensitive;
}
}
attr
}
}
impl Diagnostics {
pub(crate) fn to_tfplugin_diags(self) -> Vec<tfplugin6::Diagnostic> {
self.errors
.into_iter()
.map(|err| tfplugin6::Diagnostic {
severity: tfplugin6::diagnostic::Severity::Error as _,
summary: err,
detail: "".to_owned(),
attribute: None,
})
.collect()
}
}

View file

@ -0,0 +1,322 @@
#![allow(unused_variables, unused_imports)]
pub mod tfplugin6 {
tonic::include_proto!("tfplugin6");
}
pub mod plugin {
tonic::include_proto!("plugin");
}
use std::{
collections::{BTreeMap, HashMap},
sync::Mutex,
vec,
};
use tfplugin6::provider_server::{Provider, ProviderServer};
use tokio_util::sync::CancellationToken;
use tonic::{transport::Server, Request, Response, Result, Status};
use tracing::info;
use crate::values::Type;
fn empty_schema() -> tfplugin6::Schema {
tfplugin6::Schema {
version: 1,
block: Some(tfplugin6::schema::Block {
version: 0,
attributes: vec![],
block_types: vec![],
description: "hello world".to_owned(),
description_kind: 0,
deprecated: false,
}),
}
}
#[tonic::async_trait]
impl Provider for super::ProviderHandler {
/// GetMetadata returns upfront information about server capabilities and
/// supported resource types without requiring the server to instantiate all
/// schema information, which may be memory intensive. This RPC is optional,
/// where clients may receive an unimplemented RPC error. Clients should
/// ignore the error and call the GetProviderSchema RPC as a fallback.
/// Returns data source, managed resource, and function metadata, such as names.
async fn get_metadata(
&self,
request: Request<tfplugin6::get_metadata::Request>,
) -> Result<Response<tfplugin6::get_metadata::Response>, Status> {
info!("get_metadata");
Err(Status::unimplemented(
"GetMetadata: Not implemeneted".to_owned(),
))
}
/// GetSchema returns schema information for the provider, data resources,
/// and managed resources.
/// Returns provider schema, provider metaschema, all resource schemas and all data source schemas.
async fn get_provider_schema(
&self,
request: Request<tfplugin6::get_provider_schema::Request>,
) -> Result<Response<tfplugin6::get_provider_schema::Response>, Status> {
info!("Received get_provider_schema");
let schemas = self.get_schemas();
let reply = tfplugin6::get_provider_schema::Response {
provider: Some(empty_schema()),
provider_meta: Some(empty_schema()),
server_capabilities: Some(tfplugin6::ServerCapabilities {
plan_destroy: true,
get_provider_schema_optional: true,
move_resource_state: false,
}),
data_source_schemas: schemas.data_sources,
resource_schemas: schemas.resources,
functions: HashMap::default(),
diagnostics: schemas.diagnostics,
};
Ok(Response::new(reply))
}
/// Validates the practitioner supplied provider configuration by verifying types conform to the schema and supports value validation diagnostics.
async fn validate_provider_config(
&self,
request: Request<tfplugin6::validate_provider_config::Request>,
) -> Result<Response<tfplugin6::validate_provider_config::Response>, Status> {
tracing::info!("validate_provider_config");
let reply = tfplugin6::validate_provider_config::Response {
diagnostics: vec![],
};
Ok(Response::new(reply))
}
/// Validates the practitioner supplied resource configuration by verifying types conform to the schema and supports value validation diagnostics.
async fn validate_resource_config(
&self,
request: Request<tfplugin6::validate_resource_config::Request>,
) -> Result<Response<tfplugin6::validate_resource_config::Response>, Status> {
tracing::info!("validate_resource_config");
let reply = tfplugin6::validate_resource_config::Response {
diagnostics: vec![],
};
Ok(Response::new(reply))
}
/// Validates the practitioner supplied data source configuration by verifying types conform to the schema and supports value validation diagnostics.
async fn validate_data_resource_config(
&self,
request: Request<tfplugin6::validate_data_resource_config::Request>,
) -> Result<Response<tfplugin6::validate_data_resource_config::Response>, Status> {
tracing::info!("validate_data_resource_config");
let reply = tfplugin6::validate_data_resource_config::Response {
diagnostics: vec![],
};
Ok(Response::new(reply))
}
/// Called when a resource has existing state. Primarily useful for when the schema version does not match the current version.
/// The provider is expected to modify the state to upgrade it to the latest schema.
async fn upgrade_resource_state(
&self,
request: Request<tfplugin6::upgrade_resource_state::Request>,
) -> Result<Response<tfplugin6::upgrade_resource_state::Response>, Status> {
tracing::info!("upgrade_resource_state");
// We don't do anything interesting, it's fine.
let reply = tfplugin6::upgrade_resource_state::Response {
upgraded_state: None,
diagnostics: vec![],
};
Ok(Response::new(reply))
}
/// ////// One-time initialization, called before other functions below
/// Passes the practitioner supplied provider configuration to the provider.
async fn configure_provider(
&self,
request: Request<tfplugin6::configure_provider::Request>,
) -> Result<Response<tfplugin6::configure_provider::Response>, Status> {
tracing::info!("configure_provider");
let reply = tfplugin6::configure_provider::Response {
diagnostics: vec![],
};
Ok(Response::new(reply))
}
/// ////// Managed Resource Lifecycle
/// Called when refreshing a resource's state.
async fn read_resource(
&self,
request: Request<tfplugin6::read_resource::Request>,
) -> Result<Response<tfplugin6::read_resource::Response>, Status> {
tracing::info!("read_resource");
let reply = tfplugin6::read_resource::Response {
deferred: None,
diagnostics: vec![],
new_state: request.into_inner().current_state,
private: vec![],
};
Ok(Response::new(reply))
}
/// Calculates a plan for a resource. A proposed new state is generated, which the provider can modify.
async fn plan_resource_change(
&self,
request: Request<tfplugin6::plan_resource_change::Request>,
) -> Result<Response<tfplugin6::plan_resource_change::Response>, Status> {
tracing::info!("plan_resource_change");
let reply = tfplugin6::plan_resource_change::Response {
planned_state: request.into_inner().proposed_new_state,
requires_replace: vec![],
planned_private: vec![],
diagnostics: vec![],
legacy_type_system: false,
deferred: None,
};
Ok(Response::new(reply))
}
/// Called when a practitioner has approved a planned change.
/// The provider is to apply the changes contained in the plan, and return a resulting state matching the given plan.
async fn apply_resource_change(
&self,
request: Request<tfplugin6::apply_resource_change::Request>,
) -> Result<Response<tfplugin6::apply_resource_change::Response>, Status> {
tracing::info!("apply_resource_change");
let reply = tfplugin6::apply_resource_change::Response {
new_state: request.into_inner().planned_state,
private: vec![],
diagnostics: vec![],
legacy_type_system: false,
};
Ok(Response::new(reply))
}
/// Called when importing a resource into state so that the resource becomes managed.
async fn import_resource_state(
&self,
request: Request<tfplugin6::import_resource_state::Request>,
) -> Result<Response<tfplugin6::import_resource_state::Response>, Status> {
tracing::error!("import_resource_state");
todo!("import_resource_state")
}
async fn move_resource_state(
&self,
request: Request<tfplugin6::move_resource_state::Request>,
) -> Result<Response<tfplugin6::move_resource_state::Response>, Status> {
tracing::error!("move_resource_state");
todo!("move_resource_state")
}
/// Called when refreshing a data source's state.
async fn read_data_source(
&self,
request: Request<tfplugin6::read_data_source::Request>,
) -> Result<Response<tfplugin6::read_data_source::Response>, Status> {
tracing::info!("read_data_source");
let ds = self
.state
.as_ref()
.unwrap()
.data_sources
.get(&request.get_ref().type_name)
.unwrap();
let typ = ds.schema().typ();
let config = match &request.get_ref().config {
None => crate::values::Value::Null,
Some(v) => {
let value = crate::values::Value::msg_unpack(&v.msgpack, &typ);
match value {
Ok(value) => value,
Err(errs) => {
return Ok(Response::new(tfplugin6::read_data_source::Response {
deferred: None,
state: None,
diagnostics: errs.to_tfplugin_diags(),
}));
}
}
}
};
let state = ds.read(config);
let (state, diagnostics) = match state {
Ok(s) => (
Some(tfplugin6::DynamicValue {
msgpack: s.msg_pack(),
json: vec![],
}),
vec![],
),
Err(errs) => (None, errs.to_tfplugin_diags()),
};
let reply = tfplugin6::read_data_source::Response {
state,
deferred: None,
diagnostics,
};
Ok(Response::new(reply))
}
/// GetFunctions returns the definitions of all functions.
async fn get_functions(
&self,
request: Request<tfplugin6::get_functions::Request>,
) -> Result<Response<tfplugin6::get_functions::Response>, Status> {
tracing::error!("get_functions");
todo!("get_functions")
}
/// ////// Provider-contributed Functions
async fn call_function(
&self,
request: Request<tfplugin6::call_function::Request>,
) -> Result<Response<tfplugin6::call_function::Response>, Status> {
tracing::error!("call_function");
todo!("call_function")
}
/// ////// Graceful Shutdown
async fn stop_provider(
&self,
request: Request<tfplugin6::stop_provider::Request>,
) -> Result<Response<tfplugin6::stop_provider::Response>, Status> {
tracing::info!("stop_provider");
shutdown(&self.shutdown).await
}
}
pub struct Controller {
pub shutdown: CancellationToken,
}
async fn shutdown(token: &CancellationToken) -> ! {
token.cancel();
std::future::poll_fn::<(), _>(|_| std::task::Poll::Pending).await;
unreachable!("we've should have gone to sleep")
}
#[tonic::async_trait]
impl plugin::grpc_controller_server::GrpcController for Controller {
async fn shutdown(&self, request: Request<plugin::Empty>) -> Result<Response<plugin::Empty>> {
shutdown(&self.shutdown).await
}
}

View file

@ -0,0 +1,88 @@
mod convert;
mod grpc;
use std::collections::HashMap;
use tokio_util::sync::CancellationToken;
use crate::framework::datasource::DataSource;
use crate::framework::provider::Provider;
pub use grpc::plugin::grpc_controller_server::GrpcControllerServer;
pub use grpc::tfplugin6::provider_server::ProviderServer;
pub use grpc::Controller;
use self::grpc::tfplugin6;
pub struct ProviderHandler {
shutdown: CancellationToken,
/// Delayed diagnostics reporting in `GetProviderSchema` for better UX.
state: Result<ProviderState, Vec<tfplugin6::Diagnostic>>,
}
struct ProviderState {
data_sources: HashMap<String, Box<dyn DataSource>>,
}
impl ProviderHandler {
/// Creates a new `ProviderHandler`.
/// This function is infallible, as it is not called during a time where reporting errors nicely is possible.
/// If there's an error, we just taint our internal state and report errors in `GetProviderSchema`.
pub fn new(shutdown: CancellationToken, provider: &dyn Provider) -> Self {
let name = provider.name();
let mut data_sources = HashMap::new();
let mut errors = vec![];
for ds in provider.data_sources() {
let ds_name = ds.name(&name);
let entry = data_sources.insert(ds_name.clone(), ds);
if entry.is_some() {
errors.push(tfplugin6::Diagnostic {
severity: tfplugin6::diagnostic::Severity::Error as _,
summary: format!("data source {ds_name} exists more than once"),
detail: "".to_owned(),
attribute: None,
});
}
}
let state = if errors.len() > 0 {
Err(errors)
} else {
Ok(ProviderState { data_sources })
};
Self { shutdown, state }
}
fn get_schemas(&self) -> Schemas {
let resources = HashMap::new();
let state = match &self.state {
Ok(state) => state,
Err(errors) => {
return Schemas {
resources: HashMap::new(),
data_sources: HashMap::new(),
diagnostics: errors.clone(),
}
}
};
let data_sources = state
.data_sources
.iter()
.map(|(name, ds)| (name.to_owned(), ds.schema().to_tfplugin()))
.collect::<HashMap<String, tfplugin6::Schema>>();
Schemas {
resources,
data_sources,
diagnostics: vec![],
}
}
}
struct Schemas {
resources: HashMap<String, tfplugin6::Schema>,
data_sources: HashMap<String, tfplugin6::Schema>,
diagnostics: Vec<tfplugin6::Diagnostic>,
}

279
terustform/src/values.rs Normal file
View file

@ -0,0 +1,279 @@
// check terraform-plugin-go tfprotov6/internal/toproto for convesions
// tftypes for types and values
use std::{
collections::{BTreeMap, HashMap},
io::{self, Read},
};
use crate::framework::{DResult, Diagnostics};
#[derive(Debug)]
pub enum Type {
Bool,
Number,
String,
Dynamic,
/// A list of elements of the same type.
List {
elem: Box<Type>,
},
/// A bunch of unordered string-value pair of the same type.
Map {
elem: Box<Type>,
},
/// A set of unique values of the same type.
Set {
elem: Box<Type>,
},
/// A bunch of unordered string-value pairs of different types.
/// The attributes are statically known.
Object {
attrs: HashMap<String, Type>,
/// The attributes in `attrs` that are optional.
/// Always empty for now because of JSON reasons.
optionals: Vec<String>,
},
/// An ordered list of values of different types.
Tuple {
elems: Vec<Type>,
},
}
impl Type {
pub fn to_json(&self) -> String {
let value = self.to_json_inner();
serde_json::to_string(&value).unwrap()
}
pub fn to_json_inner(&self) -> serde_json::Value {
use serde_json::Value;
let compound =
|tag: &str, inner: Value| Value::Array(vec![Value::String(tag.to_owned()), inner]);
match self {
Self::Bool => Value::String("bool".to_owned()),
Self::String => Value::String("string".to_owned()),
Self::Number => Value::String("number".to_owned()),
Self::Dynamic => Value::String("dynamic".to_owned()),
Self::List { elem } => compound("list", elem.to_json_inner()),
Self::Map { elem } => compound("map", elem.to_json_inner()),
Self::Set { elem } => compound("set", elem.to_json_inner()),
Self::Object {
attrs,
optionals: _,
} => compound(
"object",
Value::Object(
attrs
.iter()
.map(|(k, v)| (k.clone(), v.to_json_inner()))
.collect(),
),
),
Self::Tuple { elems } => compound(
"tuple",
elems.iter().map(|elem| elem.to_json_inner()).collect(),
),
}
}
}
#[derive(Debug)]
pub enum Value {
Known(ValueKind),
Unknown,
Null,
}
#[derive(Debug)]
pub enum ValueKind {
String(String),
Number(f64),
Bool(bool),
List(Vec<Value>),
Set(Vec<Value>),
Map(BTreeMap<String, Value>),
Tuple(Vec<Value>),
Object(BTreeMap<String, Value>),
}
// marshal msg pack
// tftypes/value.go:MarshalMsgPack
impl Value {
pub fn msg_pack(&self) -> Vec<u8> {
let mut buf = Vec::new();
self.msg_pack_inner(&mut buf)
.expect("writing to Vec<u8> cannot fail");
buf
}
pub fn msg_pack_inner(&self, wr: &mut Vec<u8>) -> std::io::Result<()> {
use rmp::encode as mp;
let known = match self {
Value::Unknown => {
wr.extend_from_slice(&[0xd4, 0, 0]);
return Ok(());
}
Value::Null => {
mp::write_nil(wr)?;
return Ok(());
}
Value::Known(known) => known,
};
match known {
ValueKind::String(s) => {
mp::write_str(wr, s)?;
}
&ValueKind::Number(n) => {
if n.is_infinite() {
if n.signum() == -1.0 {
mp::write_f64(wr, f64::NEG_INFINITY)?;
} else {
mp::write_f64(wr, f64::INFINITY)?;
}
} else if (n as i64 as f64) == n {
// is int
mp::write_i64(wr, n as i64)?;
} else {
mp::write_f64(wr, n)?;
}
// Terraform handles bigfloats but we do emphatically not care
}
ValueKind::Bool(b) => {
mp::write_bool(wr, *b)?;
}
ValueKind::List(elems) | ValueKind::Set(elems) | ValueKind::Tuple(elems) => {
mp::write_array_len(wr, elems.len().try_into().unwrap())?;
for elem in elems {
elem.msg_pack_inner(wr)?;
}
}
ValueKind::Map(o) | ValueKind::Object(o) => {
mp::write_map_len(wr, o.len().try_into().unwrap())?;
for (key, val) in o {
mp::write_str(wr, key)?;
val.msg_pack_inner(wr)?;
}
}
}
Ok(())
}
pub fn msg_unpack(data: &[u8], typ: &Type) -> DResult<Self> {
tracing::debug!(?typ, ?data, "Unpacking message");
let mut read = io::Cursor::new(data);
Self::msg_unpack_inner(&mut read, typ)
}
fn msg_unpack_inner(rd: &mut io::Cursor<&[u8]>, typ: &Type) -> DResult<Self> {
use rmp::decode as mp;
if let Ok(()) = mp::read_nil(rd) {
return Ok(Value::Null);
}
rd.set_position(rd.position() - 1); // revert past the nil
let read_string = |rd: &mut io::Cursor<&[u8]>| -> DResult<String> {
let len = std::cmp::min(mp::read_str_len(rd)?, 1024 * 1024); // you're not gonna get more than a 1MB string...
let mut buf = vec![0; len as usize];
rd.read_exact(&mut buf)?;
Ok(String::from_utf8(buf)?)
};
let value = match typ {
Type::Bool => {
let b = mp::read_bool(rd)?;
ValueKind::Bool(b)
}
Type::Number => {
let prev = rd.position();
if let Ok(int) = mp::read_int::<i64, _>(rd) {
ValueKind::Number(int as f64)
} else {
rd.set_position(prev);
if let Ok(f32) = mp::read_f32(rd) {
ValueKind::Number(f32 as f64)
} else {
rd.set_position(prev);
let f64 = mp::read_f64(rd)?;
ValueKind::Number(f64)
}
}
}
Type::String => ValueKind::String(read_string(rd)?),
Type::Dynamic => todo!("dynamic"),
Type::List { elem } => {
let len = mp::read_array_len(rd)?;
let elems = (0..len)
.map(|_| Value::msg_unpack_inner(rd, &elem))
.collect::<Result<Vec<_>, _>>()?;
ValueKind::List(elems)
}
Type::Map { elem } => {
let len = mp::read_map_len(rd)?;
let elems = (0..len)
.map(|_| -> DResult<_> {
let key = read_string(rd)?;
let value = Value::msg_unpack_inner(rd, &elem)?;
Ok((key, value))
})
.collect::<DResult<BTreeMap<_, _>>>()?;
ValueKind::Map(elems)
}
Type::Set { elem } => {
let len = mp::read_array_len(rd)?;
let elems = (0..len)
.map(|_| Value::msg_unpack_inner(rd, &elem))
.collect::<Result<Vec<_>, _>>()?;
ValueKind::Set(elems)
}
Type::Object { attrs, optionals } => {
assert!(optionals.is_empty());
let len = mp::read_map_len(rd)?;
if attrs.len() != (len as usize) {
return Err(Diagnostics::error_string(format!(
"expected {} attrs, found {len} attrs in object",
attrs.len()
)));
}
let elems = (0..len)
.map(|_| -> DResult<_> {
let key = read_string(rd)?;
let typ = attrs.get(&key).ok_or_else(|| {
Diagnostics::error_string(format!("unexpected attribute: '{key}'"))
})?;
let value = Value::msg_unpack_inner(rd, &typ)?;
Ok((key, value))
})
.collect::<DResult<BTreeMap<_, _>>>()?;
ValueKind::Object(elems)
}
Type::Tuple { elems } => {
let len = mp::read_array_len(rd)?;
if elems.len() != (len as usize) {
return Err(Diagnostics::error_string(format!(
"expected {} elems, found {len} elems in tuple",
elems.len()
)));
}
let elems = elems
.iter()
.map(|typ| Value::msg_unpack_inner(rd, &typ))
.collect::<Result<Vec<_>, _>>()?;
ValueKind::Tuple(elems)
}
};
Ok(Value::Known(value))
}
}