from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from dotenv import load_dotenv
import os
from openai import OpenAI
# Load environment variables from .env file
load_dotenv()
# Setup tracer provider
provider = TracerProvider()
trace.set_tracer_provider(provider)
TFY_API_KEY = os.environ.get("TFY_API_KEY")
# OTLP exporter (HTTP)
otlp_exporter = OTLPSpanExporter(
endpoint="{enter_your_api_endpoint}/v1/traces",
headers={
"Authorization": f"Bearer {TFY_API_KEY}",
"TFY-Tracing-Project": "<enter_your_tracing_project_fqn>",
}
)
# Span processor using batch (recommended for production)
span_processor = BatchSpanProcessor(otlp_exporter)
provider.add_span_processor(span_processor)
# Get tracer
tracer = trace.get_tracer(__name__)
# Initialize OpenAI client
client = OpenAI()
def generate_ai_response(input_text):
with tracer.start_as_current_span("OpenAI-Trace") as span:
# Set up the chat messages
input_prompt = [
{"role": "user", "content": input_text}
]
# Add relevant attributes to the span
span.set_attribute("input.value", input_text)
span.set_attribute("model.name", "gpt-4")
span.set_attribute("temperature", 0.7)
span.set_attribute("gen_ai.prompt.0.role", "user")
span.set_attribute("gen_ai.prompt.0.content", input_text)
# Make the API call
response = client.chat.completions.create(
messages=input_prompt,
model="gpt-4",
temperature=0.7,
)
# Add response attributes to the span
output_content = response.choices[0].message.content
span.set_attribute("output.value", output_content)
span.set_attribute("gen_ai.completion.0.role", "assistant")
span.set_attribute("gen_ai.completion.0.content", output_content)
return output_content
# Example usage
if __name__ == "__main__":
input_text = "Explain the concept of AI in 50 words"
response = generate_ai_response(input_text)
print(response)