Below are ready-to-paste Google Apps Script examples you can add to your blog post, covering the two most useful embedding workflows:
- A) Generate an embedding for text
- B) Store embeddings in a Sheet
- C) Semantic search (find closest chunks) using cosine similarity
- D) Mini RAG flow: retrieve top matches + ask Gemini with context
Replace
YOUR_API_KEY_HEREwith your key.
These examples use the Generative Language API style endpoint.
A) Minimal: Create an embedding for a text string
/**
* Create an embedding for a string using Gemini embeddings.
* Returns an array of numbers (vector).
*/
function geminiEmbedText_(text) {
const API_KEY = 'YOUR_API_KEY_HERE';
// Model name can vary depending on your setup.
// Common examples: "text-embedding-004" (Google), or a Gemini embedding model name.
const MODEL = 'text-embedding-004';
const url = `https://generativelanguage.googleapis.com/v1beta/models/${MODEL}:embedContent?key=${API_KEY}`;
const payload = {
content: {
parts: [{ text }]
}
};
const res = UrlFetchApp.fetch(url, {
method: 'post',
contentType: 'application/json',
payload: JSON.stringify(payload),
muteHttpExceptions: true
});
const json = JSON.parse(res.getContentText());
if (!json.embedding || !json.embedding.values) {
throw new Error('Embedding failed: ' + res.getContentText());
}
return json.embedding.values; // number[]
}
function testEmbedding_() {
const v = geminiEmbedText_('How do I reset my password?');
Logger.log('Vector length: ' + v.length);
Logger.log(v.slice(0, 8)); // preview first values
}
B) Store document chunks + embeddings in Google Sheets
This creates a simple “vector table” in Sheets:
Columns
- A:
chunkId - B:
text - C:
embeddingJSON
/**
* Initialize a sheet to store chunks + embeddings.
*/
function initEmbeddingSheet_() {
const ss = SpreadsheetApp.getActive();
const name = 'Embeddings';
const sh = ss.getSheetByName(name) || ss.insertSheet(name);
sh.clear();
sh.getRange(1, 1, 1, 3).setValues([['chunkId', 'text', 'embeddingJSON']]);
sh.setFrozenRows(1);
}
/**
* Add chunks (text rows) and embed them.
* Provide an array of strings (chunks).
*/
function addChunksAndEmbed_(chunks) {
const ss = SpreadsheetApp.getActive();
const sh = ss.getSheetByName('Embeddings');
if (!sh) throw new Error('Run initEmbeddingSheet_() first.');
const startRow = sh.getLastRow() + 1;
const rows = chunks.map((t, i) => {
const chunkId = 'chunk_' + (startRow - 1 + i);
const emb = geminiEmbedText_(t);
return [chunkId, t, JSON.stringify(emb)];
});
sh.getRange(startRow, 1, rows.length, 3).setValues(rows);
}
function demoSeedEmbeddings_() {
initEmbeddingSheet_();
addChunksAndEmbed_([
'To reset your password, go to Settings > Security > Reset Password.',
'If you cannot log in, use the account recovery page to verify your email.',
'Two-factor authentication adds an extra layer of security to your account.',
'Billing issues can be resolved by updating your payment method in Billing settings.'
]);
}
C) Semantic search in Sheets (cosine similarity)
This embeds a user query, compares it to stored vectors, and returns the top matches.
function cosineSimilarity_(a, b) {
if (a.length !== b.length) throw new Error('Vector length mismatch.');
let dot = 0, normA = 0, normB = 0;
for (let i = 0; i < a.length; i++) {
dot += a[i] * b[i];
normA += a[i] * a[i];
normB += b[i] * b[i];
}
if (normA === 0 || normB === 0) return 0;
return dot / (Math.sqrt(normA) * Math.sqrt(normB));
}
/**
* Returns top N matches from the Embeddings sheet for a given query.
*/
function semanticSearch_(query, topN) {
topN = topN || 3;
const qVec = geminiEmbedText_(query);
const ss = SpreadsheetApp.getActive();
const sh = ss.getSheetByName('Embeddings');
if (!sh) throw new Error('Missing Embeddings sheet.');
const values = sh.getDataRange().getValues();
// Skip header
const rows = values.slice(1);
const scored = rows.map(r => {
const chunkId = r[0];
const text = r[1];
const vec = JSON.parse(r[2] || '[]');
const score = vec.length ? cosineSimilarity_(qVec, vec) : -1;
return { chunkId, text, score };
});
scored.sort((x, y) => y.score - x.score);
return scored.slice(0, topN);
}
function testSemanticSearch_() {
const results = semanticSearch_('I forgot my password and can’t sign in', 3);
Logger.log(JSON.stringify(results, null, 2));
}
D) Mini-RAG: Retrieve top chunks, then ask Gemini with context
This is the “AI tutor” pattern:
- Embed query
- Retrieve closest chunks
- Send those chunks as context to Gemini
generateContent
function geminiGenerate_(prompt) {
const API_KEY = 'YOUR_API_KEY_HERE';
const MODEL = 'gemini-2.0-flash'; // adjust as needed
const url = `https://generativelanguage.googleapis.com/v1beta/models/${MODEL}:generateContent?key=${API_KEY}`;
const payload = {
contents: [{ role: 'user', parts: [{ text: prompt }] }]
};
const res = UrlFetchApp.fetch(url, {
method: 'post',
contentType: 'application/json',
payload: JSON.stringify(payload),
muteHttpExceptions: true
});
const json = JSON.parse(res.getContentText());
const text =
json?.candidates?.[0]?.content?.parts?.map(p => p.text).join('') || '';
if (!text) throw new Error('Generation failed: ' + res.getContentText());
return text;
}
function answerWithRAG_(question) {
const matches = semanticSearch_(question, 3);
const context = matches
.map((m, i) => `Source ${i + 1} (score ${m.score.toFixed(3)}):\n${m.text}`)
.join('\n\n');
const prompt = `
You are a helpful support assistant. Use ONLY the provided sources.
If the answer isn't in the sources, say you don't have enough information.
SOURCES:
${context}
QUESTION:
${question}
ANSWER (concise, step-by-step if needed):
`.trim();
return geminiGenerate_(prompt);
}
function demoRAG_() {
const out = answerWithRAG_('How can I reset my password?');
Logger.log(out);
}