
  <rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
    <channel>
      <title>OpenLIT | OpenTelemetry-native GenAI and LLM Application Observability</title>
      <link>https://openlit.io/blogs</link>
      <description>OpenLIT is an open-source LLM observability platform built on OpenTelemetry. Monitor token usage, cost, and latency. Self-host for free under Apache 2.0.</description>
      <language>en-us</language>
      <managingEditor>contact@openlit.io (OpenLIT)</managingEditor>
      <webMaster>contact@openlit.io (OpenLIT)</webMaster>
      <lastBuildDate>Sat, 28 Mar 2026 00:00:00 GMT</lastBuildDate>
      <atom:link href="https://openlit.io/feed.xml" rel="self" type="application/rss+xml"/>
      
  <item>
    <guid>https://openlit.io/blogs/how-to-detect-hallucinations-in-your-rag-pipeline</guid>
    <title>How to Detect Hallucinations in Your RAG Pipeline (with Code Examples)</title>
    <link>https://openlit.io/blogs/how-to-detect-hallucinations-in-your-rag-pipeline</link>
    <description>Catch LLM hallucinations programmatically using OpenLIT&#39;s evaluation SDK. Includes Python code examples for hallucination, toxicity, and bias detection with OpenTelemetry export.</description>
    <pubDate>Sat, 28 Mar 2026 00:00:00 GMT</pubDate>
    <author>contact@openlit.io (OpenLIT)</author>
    <category>openlit</category><category>rag</category><category>hallucination</category><category>evaluation</category><category>llm</category><category>python</category>
  </item>

  <item>
    <guid>https://openlit.io/blogs/openlit-fleet-hub-at-scale</guid>
    <title>Fleet Hub Playbook for Multi-Region AI Observability</title>
    <link>https://openlit.io/blogs/openlit-fleet-hub-at-scale</link>
    <description>Coordinate fleets of OpenTelemetry collectors for GenAI workloads with OpenLIT Fleet Hub and the OpAMP protocol.</description>
    <pubDate>Fri, 07 Nov 2025 00:00:00 GMT</pubDate>
    <author>contact@openlit.io (OpenLIT)</author>
    <category>openlit</category><category>opentelemetry</category><category>llm</category><category>production</category><category>fleet-hub</category>
  </item>

  <item>
    <guid>https://openlit.io/blogs/openlit-openwebui</guid>
    <title>Monitoring LLM Usage in OpenWebUI with OpenLIT</title>
    <link>https://openlit.io/blogs/openlit-openwebui</link>
    <description>This guide walks you through integrating OpenLIT with OpenWebUI using pipelines, covering installation, configuration, and practical use cases for monitoring and observability of LLMs including token tracking, cost analysis, and GPU metrics.</description>
    <pubDate>Fri, 28 Feb 2025 00:00:00 GMT</pubDate>
    <author>contact@openlit.io (OpenLIT)</author>
    <category>openlit</category><category>open-webui</category><category>llms</category><category>observability</category><category>monitoring</category><category>opentelemetry</category>
  </item>

  <item>
    <guid>https://openlit.io/blogs/protect-prompt-injection</guid>
    <title>How to protect your OpenAI/LLM Apps from Prompt Injection Attacks</title>
    <link>https://openlit.io/blogs/protect-prompt-injection</link>
    <description>Learn how to safeguard your OpenAI and LLM apps from prompt injection attacks using UUIDs, input validation, and monitoring strategies with OpenLIT.</description>
    <pubDate>Wed, 23 Oct 2024 00:00:00 GMT</pubDate>
    <author>contact@openlit.io (OpenLIT)</author>
    <category>openlit</category><category>langchain</category><category>observability</category><category>prompt injection</category><category>openai</category>
  </item>

  <item>
    <guid>https://openlit.io/blogs/observability-with-openlit</guid>
    <title>Unlocking Seamless GenAI &amp; LLM Observability with OpenLIT</title>
    <link>https://openlit.io/blogs/observability-with-openlit</link>
    <description>OpenLIT offers seamless, OpenTelemetry-native observability for GenAI and LLMs, simplifying performance and cost tracking.</description>
    <pubDate>Thu, 15 Aug 2024 00:00:00 GMT</pubDate>
    <author>contact@openlit.io (OpenLIT)</author>
    <category>openlit</category><category>llm</category><category>genai</category><category>observability</category>
  </item>

  <item>
    <guid>https://openlit.io/blogs/pipeline-for-llm-apps</guid>
    <title>Designing an Observability Pipeline for LLM Applications</title>
    <link>https://openlit.io/blogs/pipeline-for-llm-apps</link>
    <description>Observability for LLMs ensures performance, user insights, and GPU monitoring. Learn how to design and implement a complete observability pipeline for your LLM applications.</description>
    <pubDate>Thu, 15 Aug 2024 00:00:00 GMT</pubDate>
    <author>contact@openlit.io (OpenLIT)</author>
    <category>openlit</category><category>llm</category><category>observability</category>
  </item>

    </channel>
  </rss>
