New, updated opentelemetry tracing exporter.

This commit is contained in:
Tom Alexander 2023-08-10 21:22:06 -04:00
parent 729be9302b
commit 290a700a22
Signed by: talexander
GPG Key ID: D3A179C9A53C0EDE
4 changed files with 80 additions and 28 deletions

View File

@ -22,12 +22,13 @@ path = "src/main.rs"
[dependencies]
nom = "7.1.1"
opentelemetry = { version = "0.20.0", optional = true, default-features = false, features = ["trace", "rt-tokio"] }
opentelemetry-otlp = { version = "0.13.0", optional = true }
opentelemetry-semantic-conventions = { version = "0.12.0", optional = true }
tokio = { version = "1.30.0", optional = true, default-features = false, features = ["rt", "rt-multi-thread"] }
tracing = { version = "0.1.37", optional = true }
# opentelemetry = "0.17.0"
# opentelemetry-jaeger = "0.16.0"
# tracing = "0.1.37"
# tracing-opentelemetry = "0.17.2"
# tracing-subscriber = {version="0.3.16", features=["env-filter"]}
tracing-opentelemetry = { version = "0.20.0", optional = true }
tracing-subscriber = { version = "0.3.17", optional = true, features = ["env-filter"] }
[build-dependencies]
walkdir = "2.3.3"
@ -35,7 +36,7 @@ walkdir = "2.3.3"
[features]
default = ["compare", "tracing"]
compare = []
tracing = ["dep:tracing"]
tracing = ["dep:opentelemetry", "dep:opentelemetry-otlp", "dep:opentelemetry-semantic-conventions", "dep:tokio", "dep:tracing", "dep:tracing-opentelemetry", "dep:tracing-subscriber"]
[profile.release]
lto = true

View File

@ -47,7 +47,10 @@ unittest:
.PHONY: jaeger
jaeger:
> docker run -d --rm --name organicdocker -p 6831:6831/udp -p 6832:6832/udp -p 16686:16686 -p 14268:14268 jaegertracing/all-in-one:latest
# 4317 for OTLP gRPC, 4318 for OTLP HTTP. We currently use gRPC but I forward both ports regardless.
#
# These flags didn't help even though they seem like they would: --collector.otlp.grpc.max-message-size=10000000 --collector.queue-size=20000 --collector.num-workers=100
> docker run -d --rm --name organicdocker -p 4317:4317 -p 4318:4318 -p 16686:16686 -e COLLECTOR_OTLP_ENABLED=true jaegertracing/all-in-one:1.47 --collector.grpc-server.max-message-size=10000000
.PHONY: jaegerweb
jaegerweb:

View File

@ -1,34 +1,70 @@
// use tracing_subscriber::layer::SubscriberExt;
// use tracing_subscriber::util::SubscriberInitExt;
// use tracing_subscriber::EnvFilter;
#[cfg(feature = "tracing")]
use opentelemetry_otlp::WithExportConfig;
#[cfg(feature = "tracing")]
use tracing_subscriber::prelude::__tracing_subscriber_SubscriberExt;
#[cfg(feature = "tracing")]
use tracing_subscriber::util::SubscriberInitExt;
const SERVICE_NAME: &'static str = "organic";
// Despite the obvious verbosity that fully-qualifying everything causes, in these functions I am fully-qualifying everything relating to tracing. This is because the tracing feature involves multiple libraries working together and so I think it is beneficial to see which libraries contribute which bits.
#[cfg(feature = "tracing")]
pub fn init_telemetry() -> Result<(), Box<dyn std::error::Error>> {
// let env_filter = EnvFilter::try_from_default_env().unwrap_or(EnvFilter::new("warn"));
// by default it will hit http://localhost:4317 with a gRPC payload
// TODO: I think the endpoint can be controlled by the OTEL_EXPORTER_OTLP_TRACES_ENDPOINT env variable instead of hard-coded into this code base. Regardless, I am the only developer right now so I am not too concerned.
let exporter = opentelemetry_otlp::new_exporter()
.tonic()
.with_endpoint("http://localhost:4317/v1/traces");
// let stdout = tracing_subscriber::fmt::Layer::new()
// .pretty()
// .with_file(true)
// .with_line_number(true)
// .with_thread_ids(false)
// .with_target(false);
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_exporter(exporter)
.with_trace_config(opentelemetry::sdk::trace::config().with_resource(
opentelemetry::sdk::Resource::new(vec![opentelemetry::KeyValue::new(
opentelemetry_semantic_conventions::resource::SERVICE_NAME,
SERVICE_NAME.to_string(),
)]),
))
// If I do install_batch then 1K+ spans will get orphaned off into their own trace and I get the error message "OpenTelemetry trace error occurred. cannot send message to batch processor as the channel is closed"
//
// If I do install_simple then it only creates 1 trace (which is good!) but my console gets spammed with this concerning log message that makes me think it might be dropping the extra spans on the floor: "OpenTelemetry trace error occurred. Exporter otlp encountered the following error(s): the grpc server returns error (Unknown error): , detailed error message: Service was not ready: transport error"
//
// I suspect it is related to this bug: https://github.com/open-telemetry/opentelemetry-rust/issues/888
//
// .install_simple()
.install_batch(opentelemetry::runtime::Tokio)
.expect("Error: Failed to initialize the tracer.");
// opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
// let tracer = opentelemetry_jaeger::new_pipeline()
// .with_service_name("organic")
// .install_simple()?;
let subscriber = tracing_subscriber::Registry::default();
let level_filter_layer = tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or(tracing_subscriber::EnvFilter::new("WARN"));
let tracing_layer = tracing_opentelemetry::layer().with_tracer(tracer);
// let opentelemetry = tracing_opentelemetry::layer().with_tracer(tracer);
opentelemetry::global::set_text_map_propagator(
opentelemetry::sdk::propagation::TraceContextPropagator::new(),
);
// tracing_subscriber::registry()
// // .with(env_filter)
// .with(opentelemetry)
// // .with(stdout)
// .try_init()?;
subscriber
.with(level_filter_layer)
.with(tracing_layer)
.try_init()?;
Ok(())
}
#[cfg(feature = "tracing")]
pub fn shutdown_telemetry() -> Result<(), Box<dyn std::error::Error>> {
opentelemetry::global::shutdown_tracer_provider();
Ok(())
}
#[cfg(not(feature = "tracing"))]
pub fn init_telemetry() -> Result<(), Box<dyn std::error::Error>> {
Ok(())
}
#[cfg(not(feature = "tracing"))]
pub fn shutdown_telemetry() -> Result<(), Box<dyn std::error::Error>> {
// opentelemetry::global::shutdown_tracer_provider();
Ok(())
}

View File

@ -14,7 +14,19 @@ use crate::init_tracing::init_telemetry;
use crate::init_tracing::shutdown_telemetry;
mod init_tracing;
#[cfg(not(feature = "tracing"))]
fn main() -> Result<(), Box<dyn std::error::Error>> {
main_body()
}
#[cfg(feature = "tracing")]
fn main() -> Result<(), Box<dyn std::error::Error>> {
let rt = tokio::runtime::Runtime::new()?;
let result = rt.block_on(async { main_body() });
result
}
fn main_body() -> Result<(), Box<dyn std::error::Error>> {
init_telemetry()?;
run_compare(
std::env::args()