Retrieval-Augmented Generation
Advanced RAG implementation from the Scrimba course.
Complete pipeline: query→embedding→vector search→context-aware AI response using movie/podcast database.
// RAG Pipeline Implementation
class RAGSystem(private val openAI: OpenAI, private val supabase: Supabase) {
suspend fun query(userQuery: String): RAGResponse {
// Generate query embedding
val queryEmbedding = openAI.createEmbedding(userQuery)
.data.first().embedding
// Retrieve relevant documents
val relevantDocs = supabase.matchDocuments(
embedding = queryEmbedding,
threshold = 0.78,
count = 3
)
// Build context from retrieved documents
val context = relevantDocs.joinToString("\n\n") {
"[${it.metadata["source"]}]: ${it.content}"
}
// Generate contextual response
val response = openAI.chatCompletion(listOf(
Message("system", "Use only this context: $context"),
Message("user", userQuery)
))
return RAGResponse(
answer = response.choices.first().message.content,
sources = relevantDocs.map { it.metadata["source"] as String }
)
}
}