276 lines
12 KiB
HTML
276 lines
12 KiB
HTML
<!DOCTYPE html>
|
|
<html lang="en">
|
|
<head>
|
|
<meta charset="UTF-8">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
<title>Prompt Engineering - Cheat Sheet</title>
|
|
<link rel="stylesheet" href="../css/style.css">
|
|
</head>
|
|
<body>
|
|
|
|
<nav>
|
|
<div class="nav-inner">
|
|
<a href="/" class="nav-brand">AI Cheat Sheet</a>
|
|
<div class="nav-links">
|
|
<a href="/pages/terminology.html">Terminology</a>
|
|
<a href="/pages/techniques.html">Techniques</a>
|
|
<a href="/pages/use-cases.html">Use Cases</a>
|
|
<a href="/pages/model-types.html">Model Types</a>
|
|
<a href="/pages/prompts.html" class="active">Prompt Guide</a>
|
|
<a href="/pages/math.html">Math & Concepts</a>
|
|
<a href="/pages/chat.html">Chat</a>
|
|
</div>
|
|
<button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button>
|
|
</div>
|
|
</nav>
|
|
|
|
<script>
|
|
(function(){
|
|
var btn = document.getElementById('darkToggle');
|
|
var saved = localStorage.getItem('theme');
|
|
if(saved === 'dark' || (!saved && window.matchMedia('(prefers-color-scheme: dark)').matches)){
|
|
document.documentElement.setAttribute('data-theme','dark');
|
|
btn.textContent = '☀️';
|
|
}
|
|
btn.addEventListener('click', function(){
|
|
var isDark = document.documentElement.getAttribute('data-theme') === 'dark';
|
|
if(isDark){
|
|
document.documentElement.removeAttribute('data-theme');
|
|
btn.textContent = '🌙';
|
|
localStorage.setItem('theme','light');
|
|
} else {
|
|
document.documentElement.setAttribute('data-theme','dark');
|
|
btn.textContent = '☀️';
|
|
localStorage.setItem('theme','dark');
|
|
}
|
|
});
|
|
})();
|
|
</script>
|
|
|
|
<div class="hero">
|
|
<h1>Prompt Engineering Guide</h1>
|
|
<p>Techniques for getting the best results from language models.</p>
|
|
</div>
|
|
|
|
<div class="container">
|
|
|
|
<h2 class="section-title">Prompt Patterns</h2>
|
|
<div class="prompt-block">
|
|
<span class="label">Zero-Shot</span>
|
|
<h3>Just ask — no examples needed</h3>
|
|
<p>The simplest approach: give the model a task directly. Works surprisingly well with capable models.</p>
|
|
<div class="example"><strong>Prompt:</strong> "Translate the following English text to French: 'Hello, how are you?'"</div>
|
|
</div>
|
|
<div class="prompt-block">
|
|
<span class="label">Few-Shot</span>
|
|
<h3>Show examples to guide behavior</h3>
|
|
<p>Include a few input-output examples in the prompt to teach the model the desired format or style.</p>
|
|
<div class="example"><strong>Prompt:</strong><br>
|
|
"Classify the sentiment:<br>
|
|
'I love this!' → Positive<br>
|
|
'This is terrible.' → Negative<br>
|
|
'It's okay, I guess.' → ?"</div>
|
|
</div>
|
|
<div class="prompt-block">
|
|
<span class="label">Chain-of-Thought</span>
|
|
<h3>Think step by step</h3>
|
|
<p>Asking the model to reason through a problem before answering improves accuracy on complex tasks.</p>
|
|
<div class="example"><strong>Prompt:</strong> "A store has 50 apples. They sell 12 in the morning and receive 30 more. How many do they have?"</div>
|
|
<div class="example"><strong>Without CoT:</strong> "80"<br>
|
|
<strong>With CoT:</strong> "50 - 12 = 38. 38 + 30 = 68. Answer: 68"</div>
|
|
</div>
|
|
<div class="prompt-block">
|
|
<span class="label">Role Prompting</span>
|
|
<h3>Assign a persona</h3>
|
|
<p>Telling the model to act as an expert in a domain primes it to use relevant knowledge and tone.</p>
|
|
<div class="example"><strong>Prompt:</strong> "You are a senior Python developer. Review this code for best practices and security issues."</div>
|
|
</div>
|
|
<div class="prompt-block">
|
|
<span class="label">Structured Output</span>
|
|
<h3>Force a specific format</h3>
|
|
<p>Specify the exact output format (JSON, CSV, markdown table) for programmatic use.</p>
|
|
<div class="example"><strong>Prompt:</strong> "Extract all product names and prices from this text. Return as a JSON array with keys 'name' and 'price'."</div>
|
|
</div>
|
|
<div class="prompt-block">
|
|
<span class="label">Self-Consistency</span>
|
|
<h3>Ask multiple times, pick the best</h3>
|
|
<p>Generate several answers and take the most common or highest-quality one. Improves reliability on reasoning tasks.</p>
|
|
</div>
|
|
<div class="prompt-block">
|
|
<span class="label">ReAct (Reason + Act)</span>
|
|
<h3>Think, act, observe, repeat</h3>
|
|
<p>Alternate between reasoning about a problem and taking actions (searching, calculating) to gather information.</p>
|
|
<div class="example"><strong>Prompt:</strong> "Thought: I need to find the population of Tokyo. Action: search('Tokyo population 2024')<br>Observation: Tokyo has 37 million people.<br>Thought: Now I can answer the question."</div>
|
|
</div>
|
|
|
|
<h2 class="section-title">Prompt Tips</h2>
|
|
<div class="def-card">
|
|
<span class="category">Best Practice</span>
|
|
<h3>Be specific and detailed</h3>
|
|
<p>Vague prompts get vague answers. Specify format, length, tone, audience, and constraints.</p>
|
|
<div class="example">❌ "Write about AI."<br>
|
|
✅ "Write a 200-word blog post about AI in healthcare for a general audience. Use a friendly tone and include one real-world example."</div>
|
|
</div>
|
|
<div class="def-card">
|
|
<span class="category">Best Practice</span>
|
|
<h3>Use delimiters for clarity</h3>
|
|
<p>Separate instructions from data using quotes, XML tags, or dashes to help the model distinguish them.</p>
|
|
<div class="example"><strong>Prompt:</strong> "Summarize the text in <instructions> tags: <br><data>{paste article here}</data>"</div>
|
|
</div>
|
|
<div class="def-card">
|
|
<span class="category">Best Practice</span>
|
|
<h3>Provide context</h3>
|
|
<p>The more background you give, the better the model can tailor its response. Include relevant details, constraints, and goals.</p>
|
|
</div>
|
|
<div class="def-card">
|
|
<span class="category">Best Practice</span>
|
|
<h3>Iterate and refine</h3>
|
|
<p>First prompts are rarely perfect. Try variations, add examples, adjust constraints, and combine techniques.</p>
|
|
</div>
|
|
<div class="def-card">
|
|
<span class="category">Anti-Pattern</span>
|
|
<h3>Avoid ambiguous instructions</h3>
|
|
<p>"Make it better" or "fix this" without specifics leads to unpredictable results. State exactly what you want changed.</p>
|
|
</div>
|
|
<div class="def-card">
|
|
<span class="category">Anti-Pattern</span>
|
|
<h3>Don't overload the context window</h3>
|
|
<p>Pasting entire books or massive documents wastes tokens and can cause the model to miss key information. Summarize or use RAG.</p>
|
|
</div>
|
|
|
|
<h2 class="section-title">Template Examples</h2>
|
|
<div class="prompt-block">
|
|
<span class="label">Analysis Template</span>
|
|
<h3>Structured analysis prompt</h3>
|
|
<div class="example"><strong>Prompt:</strong>
|
|
"Analyze the following text and provide:
|
|
1. Key topics (bullet list)
|
|
2. Overall sentiment (positive/negative/neutral) with reasoning
|
|
3. Three most important quotes
|
|
4. A one-sentence summary
|
|
Text: {text}"</div>
|
|
</div>
|
|
<div class="prompt-block">
|
|
<span class="label">Coding Template</span>
|
|
<h3>Code generation with constraints</h3>
|
|
<div class="example"><strong>Prompt:</strong>
|
|
"Write a {language} function that {task}.
|
|
Constraints:
|
|
- Handle edge cases
|
|
- Include type hints
|
|
- Add docstring
|
|
- Keep it under {N} lines
|
|
- No external dependencies"</div>
|
|
</div>
|
|
<div class="prompt-block">
|
|
<span class="label">Critique Template</span>
|
|
<h3>Self-reflection prompt</h3>
|
|
<div class="example"><strong>Prompt:</strong>
|
|
"Here is a draft response. Critique it for:
|
|
- Accuracy
|
|
- Clarity
|
|
- Completeness
|
|
- Tone
|
|
Then rewrite it incorporating your feedback."</div>
|
|
</div>
|
|
|
|
<h2 class="section-title">Playground</h2>
|
|
<div class="def-card">
|
|
<span class="category">Interactive</span>
|
|
<h3>🧪 Test Your Prompts Live</h3>
|
|
<p>Try any prompt technique with your configured model. Paste a prompt template, fill in the variables, and see the result.</p>
|
|
<div class="llm-mini-chat visible" id="prompt-playground">
|
|
<div class="llm-mini-chat-header"><h4>🧪 Prompt Playground</h4><button class="llm-close-btn" onclick="this.closest(\'.llm-mini-chat\').classList.remove(\'visible\')">✕</button></div>
|
|
<div style="margin-bottom: 0.8rem;">
|
|
<label style="font-size: 0.8rem; font-weight: 700; color: var(--pink-600); text-transform: uppercase; letter-spacing: 0.5px; display: block; margin-bottom: 0.3rem;">Choose a technique</label>
|
|
<select id="prompt-technique" style="width: 100%; padding: 0.5rem 0.8rem; border: 2px solid var(--pink-200); border-radius: 10px; font-size: 0.88rem; color: var(--pink-900); background: var(--pink-50);">
|
|
<option value="zero-shot">Zero-Shot</option>
|
|
<option value="few-shot">Few-Shot</option>
|
|
<option value="chain-of-thought">Chain-of-Thought</option>
|
|
<option value="role">Role Prompting</option>
|
|
<option value="structured">Structured Output</option>
|
|
</select>
|
|
</div>
|
|
<div style="margin-bottom: 0.8rem;">
|
|
<label style="font-size: 0.8rem; font-weight: 700; color: var(--pink-600); text-transform: uppercase; letter-spacing: 0.5px; display: block; margin-bottom: 0.3rem;">Your prompt</label>
|
|
<textarea class="llm-mini-chat-input" id="prompt-input" rows="4" placeholder="Enter your prompt here..."></textarea>
|
|
</div>
|
|
<button class="llm-mini-chat-send" onclick="testPrompt()" style="width: 100%; padding: 0.7rem;">Send to LLM</button>
|
|
<div class="llm-mini-chat-output" id="prompt-output" style="margin-top: 0.8rem;"></div>
|
|
</div>
|
|
</div>
|
|
|
|
</div>
|
|
|
|
<footer>AI Cheat Sheet — A learning reference for artificial intelligence</footer>
|
|
|
|
<script src="../lib/modal.js"></script>
|
|
<script src="../lib/llm.js"></script>
|
|
<script>
|
|
(function(){
|
|
var techniquePrompts = {
|
|
'zero-shot': 'Perform the task I describe. Do not add examples. Just answer directly.',
|
|
'few-shot': 'Classify the sentiment of the following text. Here are examples:\n\n"I love this!" → Positive\n"This is terrible." → Negative\n"It\'s okay, I guess." → ?\n\n{text}\n→',
|
|
'chain-of-thought': 'Solve this step by step. Think through each step carefully before giving your final answer.\n\n{task}',
|
|
'role': 'You are an expert in {domain}. {task}',
|
|
'structured': 'Analyze the following text and return results as JSON with these keys: summary, sentiment, key_topics, action_items.\n\n{text}'
|
|
};
|
|
|
|
function applyTechnique() {
|
|
var technique = document.getElementById('prompt-technique').value;
|
|
var basePrompt = techniquePrompts[technique];
|
|
var userPrompt = document.getElementById('prompt-input').value.trim();
|
|
|
|
if (!userPrompt) return basePrompt;
|
|
|
|
return basePrompt.replace('{text}', userPrompt).replace('{task}', userPrompt).replace('{domain}', 'general');
|
|
}
|
|
|
|
function testPrompt() {
|
|
var userPrompt = document.getElementById('prompt-input').value.trim();
|
|
var technique = document.getElementById('prompt-technique').value;
|
|
|
|
if (!userPrompt) {
|
|
LLMModal.open('🧪 Prompt Playground');
|
|
LLMModal.error('Please enter a prompt to test.');
|
|
return;
|
|
}
|
|
|
|
var techniqueNames = {
|
|
'zero-shot': 'Zero-Shot',
|
|
'few-shot': 'Few-Shot',
|
|
'chain-of-thought': 'Chain-of-Thought',
|
|
'role': 'Role Prompting',
|
|
'structured': 'Structured Output'
|
|
};
|
|
|
|
LLMModal.open('🧪 Testing: ' + techniqueNames[technique]);
|
|
var finalPrompt = applyTechnique();
|
|
var systemPrompt = 'You are a helpful AI assistant. Respond to the prompt below using the specified technique.';
|
|
|
|
var messages = [
|
|
{ role: 'system', content: systemPrompt },
|
|
{ role: 'user', content: finalPrompt }
|
|
];
|
|
|
|
var fullText = '';
|
|
LLM.callAPI(
|
|
messages,
|
|
function(chunk) {
|
|
fullText += chunk;
|
|
LLMModal.update(fullText);
|
|
},
|
|
function() {},
|
|
function(err) {
|
|
LLMModal.error(err);
|
|
}
|
|
);
|
|
}
|
|
|
|
window.testPrompt = testPrompt;
|
|
})();
|
|
</script>
|
|
|
|
</body>
|
|
</html>
|