LLM integration with all the tabs
This commit is contained in:
@@ -174,9 +174,102 @@
|
||||
Then rewrite it incorporating your feedback."</div>
|
||||
</div>
|
||||
|
||||
<h2 class="section-title">Playground</h2>
|
||||
<div class="def-card">
|
||||
<span class="category">Interactive</span>
|
||||
<h3>🧪 Test Your Prompts Live</h3>
|
||||
<p>Try any prompt technique with your configured model. Paste a prompt template, fill in the variables, and see the result.</p>
|
||||
<div class="llm-mini-chat visible" id="prompt-playground">
|
||||
<div class="llm-mini-chat-header"><h4>🧪 Prompt Playground</h4><button class="llm-close-btn" onclick="this.closest(\'.llm-mini-chat\').classList.remove(\'visible\')">✕</button></div>
|
||||
<div style="margin-bottom: 0.8rem;">
|
||||
<label style="font-size: 0.8rem; font-weight: 700; color: var(--pink-600); text-transform: uppercase; letter-spacing: 0.5px; display: block; margin-bottom: 0.3rem;">Choose a technique</label>
|
||||
<select id="prompt-technique" style="width: 100%; padding: 0.5rem 0.8rem; border: 2px solid var(--pink-200); border-radius: 10px; font-size: 0.88rem; color: var(--pink-900); background: var(--pink-50);">
|
||||
<option value="zero-shot">Zero-Shot</option>
|
||||
<option value="few-shot">Few-Shot</option>
|
||||
<option value="chain-of-thought">Chain-of-Thought</option>
|
||||
<option value="role">Role Prompting</option>
|
||||
<option value="structured">Structured Output</option>
|
||||
</select>
|
||||
</div>
|
||||
<div style="margin-bottom: 0.8rem;">
|
||||
<label style="font-size: 0.8rem; font-weight: 700; color: var(--pink-600); text-transform: uppercase; letter-spacing: 0.5px; display: block; margin-bottom: 0.3rem;">Your prompt</label>
|
||||
<textarea class="llm-mini-chat-input" id="prompt-input" rows="4" placeholder="Enter your prompt here..."></textarea>
|
||||
</div>
|
||||
<button class="llm-mini-chat-send" onclick="testPrompt()" style="width: 100%; padding: 0.7rem;">Send to LLM</button>
|
||||
<div class="llm-mini-chat-output" id="prompt-output" style="margin-top: 0.8rem;"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<footer>AI Cheat Sheet — A learning reference for artificial intelligence</footer>
|
||||
|
||||
<script src="../lib/modal.js"></script>
|
||||
<script src="../lib/llm.js"></script>
|
||||
<script>
|
||||
(function(){
|
||||
var techniquePrompts = {
|
||||
'zero-shot': 'Perform the task I describe. Do not add examples. Just answer directly.',
|
||||
'few-shot': 'Classify the sentiment of the following text. Here are examples:\n\n"I love this!" → Positive\n"This is terrible." → Negative\n"It\'s okay, I guess." → ?\n\n{text}\n→',
|
||||
'chain-of-thought': 'Solve this step by step. Think through each step carefully before giving your final answer.\n\n{task}',
|
||||
'role': 'You are an expert in {domain}. {task}',
|
||||
'structured': 'Analyze the following text and return results as JSON with these keys: summary, sentiment, key_topics, action_items.\n\n{text}'
|
||||
};
|
||||
|
||||
function applyTechnique() {
|
||||
var technique = document.getElementById('prompt-technique').value;
|
||||
var basePrompt = techniquePrompts[technique];
|
||||
var userPrompt = document.getElementById('prompt-input').value.trim();
|
||||
|
||||
if (!userPrompt) return basePrompt;
|
||||
|
||||
return basePrompt.replace('{text}', userPrompt).replace('{task}', userPrompt).replace('{domain}', 'general');
|
||||
}
|
||||
|
||||
function testPrompt() {
|
||||
var userPrompt = document.getElementById('prompt-input').value.trim();
|
||||
var technique = document.getElementById('prompt-technique').value;
|
||||
|
||||
if (!userPrompt) {
|
||||
LLMModal.open('🧪 Prompt Playground');
|
||||
LLMModal.error('Please enter a prompt to test.');
|
||||
return;
|
||||
}
|
||||
|
||||
var techniqueNames = {
|
||||
'zero-shot': 'Zero-Shot',
|
||||
'few-shot': 'Few-Shot',
|
||||
'chain-of-thought': 'Chain-of-Thought',
|
||||
'role': 'Role Prompting',
|
||||
'structured': 'Structured Output'
|
||||
};
|
||||
|
||||
LLMModal.open('🧪 Testing: ' + techniqueNames[technique]);
|
||||
var finalPrompt = applyTechnique();
|
||||
var systemPrompt = 'You are a helpful AI assistant. Respond to the prompt below using the specified technique.';
|
||||
|
||||
var messages = [
|
||||
{ role: 'system', content: systemPrompt },
|
||||
{ role: 'user', content: finalPrompt }
|
||||
];
|
||||
|
||||
var fullText = '';
|
||||
LLM.callAPI(
|
||||
messages,
|
||||
function(chunk) {
|
||||
fullText += chunk;
|
||||
LLMModal.update(fullText);
|
||||
},
|
||||
function() {},
|
||||
function(err) {
|
||||
LLMModal.error(err);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
window.testPrompt = testPrompt;
|
||||
})();
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
Reference in New Issue
Block a user