Compare commits

..

3 Commits

Author SHA1 Message Date
6359aefd18 add image generation tab and fix mobile menu
All checks were successful
Build and Push Container / build-and-push (push) Successful in 14s
2026-05-05 09:13:17 -04:00
695a3a510e LLM integration with all the tabs 2026-05-05 08:51:34 -04:00
de34ee9067 add chat link to index.html 2026-05-05 07:14:50 -04:00
13 changed files with 2307 additions and 73 deletions

8
compose.yaml Normal file
View File

@@ -0,0 +1,8 @@
services:
web:
build: .
ports:
- "8080:80"
volumes:
- .:/usr/share/nginx/html:delegated,Z
restart: unless-stopped

View File

@@ -23,46 +23,70 @@ body {
color: var(--pink-900); color: var(--pink-900);
line-height: 1.6; line-height: 1.6;
min-height: 100vh; min-height: 100vh;
padding-left: 240px;
} }
a { color: var(--pink-500); text-decoration: none; } a { color: var(--pink-500); text-decoration: none; }
a:hover { color: var(--pink-700); text-decoration: underline; } a:hover { color: var(--pink-700); text-decoration: underline; }
/* Navigation */ /* Navigation - Sidebar */
nav { nav {
background: linear-gradient(90deg, var(--pink-600), var(--pink-500), var(--pink-600)); background: linear-gradient(180deg, var(--pink-600), var(--pink-500), var(--pink-600));
padding: 0 2rem; position: fixed;
position: sticky;
top: 0; top: 0;
left: 0;
width: 240px;
height: 100vh;
z-index: 100; z-index: 100;
box-shadow: 0 4px 20px rgba(255,20,147,0.3); box-shadow: 4px 0 20px rgba(255,20,147,0.2);
overflow-y: auto;
display: flex;
flex-direction: column;
} }
.nav-inner { .nav-inner {
max-width: 1100px;
margin: 0 auto;
display: flex; display: flex;
align-items: center; flex-direction: column;
gap: 1rem; align-items: stretch;
padding: 1.5rem 1rem;
gap: 0.5rem;
flex: 1;
} }
.nav-brand { .nav-brand {
color: var(--white); color: var(--white);
font-weight: 800; font-weight: 800;
font-size: 1.4rem; font-size: 1.2rem;
letter-spacing: -0.5px; letter-spacing: -0.5px;
padding: 1rem 0; padding: 0.5rem 0.8rem;
text-decoration: none;
white-space: nowrap;
border-radius: 8px;
} }
.nav-links { display: flex; gap: 0.25rem; flex-wrap: wrap; } .nav-brand:hover {
background: rgba(255,255,255,0.1);
text-decoration: none;
}
.nav-links {
display: flex;
flex-direction: column;
gap: 0.15rem;
flex: 1;
}
.nav-links a { .nav-links a {
color: var(--pink-100); color: var(--pink-100);
padding: 0.6rem 1rem; padding: 0.6rem 0.8rem;
border-radius: 8px; border-radius: 8px;
font-size: 0.9rem; font-size: 0.88rem;
font-weight: 500; font-weight: 500;
transition: background 0.2s; transition: background 0.2s;
text-decoration: none;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
} }
.nav-links a:hover, .nav-links a:hover,
@@ -73,12 +97,97 @@ nav {
box-shadow: 0 0 10px rgba(255,62,196,0.3); box-shadow: 0 0 10px rgba(255,62,196,0.3);
} }
.nav-actions {
display: flex;
flex-direction: column;
gap: 0.5rem;
padding-top: 0.5rem;
border-top: 1px solid rgba(255,255,255,0.15);
}
/* Menu Toggle (Hamburger) - mobile only */
.menu-toggle {
background: var(--pink-800);
border: 2px solid var(--pink-400);
color: var(--white);
padding: 0.4rem 0.7rem;
border-radius: 8px;
cursor: pointer;
font-size: 1.1rem;
transition: background 0.2s, border-color 0.2s;
line-height: 1;
display: none;
position: fixed;
top: 0.8rem;
left: 0.8rem;
z-index: 10001;
}
.menu-toggle:hover {
background: var(--pink-900);
border-color: var(--pink-300);
}
/* Dark mode toggle in sidebar */
.dark-toggle {
background: rgba(0,0,0,0.2);
border: 2px solid rgba(255,255,255,0.2);
color: var(--white);
padding: 0.5rem 0.7rem;
border-radius: 8px;
cursor: pointer;
font-size: 1rem;
transition: background 0.2s, border-color 0.2s;
line-height: 1;
align-self: stretch;
}
.dark-toggle:hover {
background: rgba(0,0,0,0.3);
border-color: rgba(255,255,255,0.3);
}
/* Mobile responsive - sidebar becomes overlay */
@media (max-width: 900px) {
nav {
width: 260px;
transform: translateX(-100%);
transition: transform 0.3s ease;
z-index: 9999;
}
nav.sidebar-open {
transform: translateX(0);
}
.menu-toggle {
display: block;
}
.nav-links a {
padding: 0.7rem 0.8rem;
font-size: 0.95rem;
}
body {
padding-left: 0;
}
.hero {
padding-left: 3.5rem;
}
.container {
padding-left: 1rem;
}
}
/* Hero */ /* Hero */
.hero { .hero {
background: linear-gradient(135deg, var(--pink-500), var(--pink-600), var(--pink-700)); background: linear-gradient(135deg, var(--pink-500), var(--pink-600), var(--pink-700));
color: var(--white); color: var(--white);
text-align: center; text-align: center;
padding: 5rem 2rem; padding: 4rem 2rem;
position: relative; position: relative;
overflow: hidden; overflow: hidden;
} }
@@ -132,6 +241,18 @@ nav {
border-color: var(--pink-400); border-color: var(--pink-400);
} }
.card-link {
text-decoration: none;
color: inherit;
display: block;
}
.card-link:hover .card {
transform: translateY(-4px);
box-shadow: 0 15px 35px rgba(255,20,147,0.25);
border-color: var(--pink-400);
}
.card h3 { .card h3 {
color: var(--pink-600); color: var(--pink-600);
font-size: 1.2rem; font-size: 1.2rem;
@@ -292,25 +413,13 @@ footer {
padding: 1.8rem; padding: 1.8rem;
font-size: 0.9rem; font-size: 0.9rem;
box-shadow: 0 -4px 20px rgba(255,20,147,0.3); box-shadow: 0 -4px 20px rgba(255,20,147,0.3);
margin-left: 240px;
} }
/* Dark Mode Toggle */ @media (max-width: 900px) {
.dark-toggle { footer {
background: var(--pink-800); margin-left: 0;
border: 2px solid var(--pink-400); }
color: var(--white);
padding: 0.4rem 0.7rem;
border-radius: 8px;
cursor: pointer;
font-size: 1.1rem;
margin-left: auto;
transition: background 0.2s, border-color 0.2s;
line-height: 1;
}
.dark-toggle:hover {
background: var(--pink-900);
border-color: var(--pink-300);
} }
/* Dark Mode */ /* Dark Mode */
@@ -354,8 +463,8 @@ footer {
} }
[data-theme="dark"] nav { [data-theme="dark"] nav {
background: linear-gradient(90deg, #0f3460, #1a1a3e, #0f3460); background: linear-gradient(180deg, #0f3460, #1a1a3e, #0f3460);
box-shadow: 0 4px 20px rgba(255,20,147,0.15); box-shadow: 4px 0 20px rgba(255,20,147,0.1);
} }
[data-theme="dark"] .nav-links a { [data-theme="dark"] .nav-links a {
@@ -366,7 +475,46 @@ footer {
[data-theme="dark"] .nav-links a.active { [data-theme="dark"] .nav-links a.active {
background: #0f3460; background: #0f3460;
color: var(--text-nav-hover); color: var(--text-nav-hover);
box-shadow: 0 0 10px rgba(255,62,196,0.2); box-shadow: 0 0 10px rgba(255,62,196,0.15);
}
/* Dark mode sidebar toggle buttons */
[data-theme="dark"] .menu-toggle {
background: #0f3460;
border-color: var(--pink-500);
}
[data-theme="dark"] .menu-toggle:hover {
background: #1a1a3e;
border-color: var(--pink-400);
}
[data-theme="dark"] .dark-toggle {
background: rgba(255,255,255,0.05);
border-color: rgba(255,255,255,0.15);
}
[data-theme="dark"] .dark-toggle:hover {
background: rgba(255,255,255,0.1);
border-color: rgba(255,255,255,0.25);
}
/* Mobile sidebar backdrop */
@media (max-width: 900px) {
.sidebar-backdrop {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background: rgba(0,0,0,0.5);
z-index: 9998;
display: none;
}
.sidebar-backdrop.visible {
display: block;
}
} }
[data-theme="dark"] .hero { [data-theme="dark"] .hero {
@@ -383,7 +531,8 @@ footer {
box-shadow: 0 10px 25px rgba(0,0,0,0.3), 0 4px 10px rgba(255,20,147,0.08); box-shadow: 0 10px 25px rgba(0,0,0,0.3), 0 4px 10px rgba(255,20,147,0.08);
} }
[data-theme="dark"] .card:hover { [data-theme="dark"] .card:hover,
[data-theme="dark"] .card-link:hover .card {
box-shadow: 0 15px 35px rgba(0,0,0,0.4), 0 4px 10px rgba(255,20,147,0.15); box-shadow: 0 15px 35px rgba(0,0,0,0.4), 0 4px 10px rgba(255,20,147,0.15);
border-color: var(--border-accent); border-color: var(--border-accent);
} }
@@ -782,3 +931,687 @@ footer {
[data-theme="dark"] .send-btn:hover:not(:disabled) { [data-theme="dark"] .send-btn:hover:not(:disabled) {
background: linear-gradient(135deg, var(--pink-500), #1a1a3e); background: linear-gradient(135deg, var(--pink-500), #1a1a3e);
} }
/* LLM interactive elements */
.llm-btn {
background: linear-gradient(135deg, var(--pink-400), var(--pink-500));
color: var(--white);
border: none;
padding: 0.5rem 1rem;
border-radius: 10px;
cursor: pointer;
font-size: 0.82rem;
font-weight: 700;
text-transform: uppercase;
letter-spacing: 0.5px;
transition: background 0.2s, transform 0.1s, opacity 0.2s;
display: inline-flex;
align-items: center;
gap: 0.4rem;
}
.llm-btn:hover:not(:disabled) {
background: linear-gradient(135deg, var(--pink-500), var(--pink-600));
transform: translateY(-1px);
}
.llm-btn:disabled {
opacity: 0.5;
cursor: not-allowed;
}
.llm-btn .icon {
font-size: 1rem;
}
.llm-response-area {
background: var(--pink-100);
border: 2px solid var(--pink-300);
border-radius: 12px;
padding: 1rem 1.2rem;
margin-top: 0.8rem;
font-size: 0.9rem;
line-height: 1.6;
max-height: 400px;
overflow-y: auto;
display: none;
}
.llm-response-area.visible {
display: block;
animation: llmFadeIn 0.3s ease-out;
}
@keyframes llmFadeIn {
from { opacity: 0; transform: translateY(8px); }
to { opacity: 1; transform: translateY(0); }
}
.llm-loading {
color: var(--pink-500);
font-style: italic;
}
.llm-error {
color: var(--pink-700);
font-weight: 600;
}
.llm-inline-code {
background: var(--white);
padding: 0.15rem 0.4rem;
border-radius: 4px;
font-family: 'Courier New', monospace;
font-size: 0.85em;
border: 1px solid var(--pink-300);
}
.llm-code-block {
background: var(--pink-50);
padding: 0.8rem 1rem;
border-radius: 8px;
overflow-x: auto;
font-size: 0.85rem;
border: 1px solid var(--pink-200);
margin: 0.5rem 0;
}
.llm-code-block code {
font-family: 'Courier New', monospace;
white-space: pre-wrap;
}
.llm-mini-chat {
display: none;
background: var(--white);
border: 2px solid var(--pink-300);
border-radius: 16px;
padding: 1.2rem;
margin-top: 1rem;
box-shadow: var(--shadow-lg);
}
.llm-mini-chat.visible {
display: block;
animation: llmFadeIn 0.3s ease-out;
}
.llm-mini-chat-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 0.8rem;
padding-bottom: 0.5rem;
border-bottom: 2px solid var(--pink-200);
}
.llm-mini-chat-header h4 {
color: var(--pink-600);
font-size: 0.95rem;
margin: 0;
}
.llm-close-btn {
background: none;
border: none;
color: var(--pink-500);
cursor: pointer;
font-size: 1.2rem;
padding: 0.2rem;
line-height: 1;
transition: color 0.2s;
}
.llm-close-btn:hover {
color: var(--pink-700);
}
.llm-mini-chat-output {
background: var(--pink-50);
border: 1px solid var(--pink-200);
border-radius: 10px;
padding: 0.8rem 1rem;
margin-bottom: 0.8rem;
min-height: 40px;
font-size: 0.9rem;
line-height: 1.6;
max-height: 300px;
overflow-y: auto;
white-space: pre-wrap;
}
.llm-mini-chat-input-row {
display: flex;
gap: 0.5rem;
}
.llm-mini-chat-input {
flex: 1;
padding: 0.5rem 0.8rem;
border: 2px solid var(--pink-200);
border-radius: 10px;
font-size: 0.88rem;
color: var(--pink-900);
background: var(--pink-50);
font-family: inherit;
resize: none;
outline: none;
}
.llm-mini-chat-input:focus {
border-color: var(--pink-400);
background: var(--white);
}
.llm-mini-chat-send {
background: linear-gradient(135deg, var(--pink-400), var(--pink-500));
color: var(--white);
border: none;
padding: 0.5rem 1rem;
border-radius: 10px;
cursor: pointer;
font-size: 0.85rem;
font-weight: 700;
white-space: nowrap;
transition: background 0.2s;
}
.llm-mini-chat-send:hover:not(:disabled) {
background: linear-gradient(135deg, var(--pink-500), var(--pink-600));
}
.llm-mini-chat-send:disabled {
opacity: 0.5;
cursor: not-allowed;
}
/* Dark mode LLM styles */
[data-theme="dark"] .llm-btn {
background: linear-gradient(135deg, var(--pink-600), #0f3460);
}
[data-theme="dark"] .llm-btn:hover:not(:disabled) {
background: linear-gradient(135deg, var(--pink-500), #1a1a3e);
}
[data-theme="dark"] .llm-response-area {
background: var(--bg-primary);
border-color: var(--border-secondary);
}
[data-theme="dark"] .llm-loading {
color: var(--pink-400);
}
[data-theme="dark"] .llm-error {
color: var(--pink-300);
}
[data-theme="dark"] .llm-inline-code {
background: var(--bg-card);
border-color: var(--border-primary);
color: var(--text-primary);
}
[data-theme="dark"] .llm-code-block {
background: var(--bg-table);
border-color: var(--border-primary);
color: var(--text-primary);
}
[data-theme="dark"] .llm-mini-chat {
background: var(--bg-card);
border-color: var(--border-primary);
}
[data-theme="dark"] .llm-mini-chat-header {
border-bottom-color: var(--border-primary);
}
[data-theme="dark"] .llm-mini-chat-header h4 {
color: var(--text-heading);
}
[data-theme="dark"] .llm-close-btn {
color: var(--text-secondary);
}
[data-theme="dark"] .llm-close-btn:hover {
color: var(--text-primary);
}
[data-theme="dark"] .llm-mini-chat-output {
background: var(--bg-primary);
border-color: var(--border-primary);
color: var(--text-primary);
}
[data-theme="dark"] .llm-mini-chat-input {
background: var(--bg-primary);
border-color: var(--border-primary);
color: var(--text-primary);
}
[data-theme="dark"] .llm-mini-chat-input:focus {
border-color: var(--border-accent);
background: var(--bg-secondary);
}
[data-theme="dark"] .llm-mini-chat-send {
background: linear-gradient(135deg, var(--pink-600), #0f3460);
}
[data-theme="dark"] .llm-mini-chat-send:hover:not(:disabled) {
background: linear-gradient(135deg, var(--pink-500), #1a1a3e);
}
/* Ask a question section on home page */
.ask-question-section {
margin-top: 2.5rem;
}
.ask-question-box {
background: var(--white);
border-radius: 16px;
padding: 2rem;
box-shadow: var(--shadow-lg);
border: 2px solid var(--pink-200);
}
.ask-question-box h3 {
color: var(--pink-700);
margin-bottom: 1rem;
font-size: 1.2rem;
}
.ask-question-input-row {
display: flex;
gap: 0.8rem;
margin-bottom: 1rem;
}
.ask-question-input {
flex: 1;
padding: 0.8rem 1rem;
border: 2px solid var(--pink-200);
border-radius: 12px;
font-size: 0.95rem;
color: var(--pink-900);
background: var(--pink-50);
font-family: inherit;
resize: none;
outline: none;
}
.ask-question-input:focus {
border-color: var(--pink-400);
background: var(--white);
}
.ask-question-answer {
background: var(--pink-50);
border: 1px solid var(--pink-200);
border-radius: 10px;
padding: 1rem 1.2rem;
font-size: 0.95rem;
line-height: 1.6;
min-height: 40px;
white-space: pre-wrap;
display: none;
}
.ask-question-answer.visible {
display: block;
animation: llmFadeIn 0.3s ease-out;
}
[data-theme="dark"] .ask-question-box {
background: var(--bg-card);
border-color: var(--border-primary);
}
[data-theme="dark"] .ask-question-box h3 {
color: var(--text-heading);
}
[data-theme="dark"] .ask-question-input {
background: var(--bg-primary);
border-color: var(--border-primary);
color: var(--text-primary);
}
[data-theme="dark"] .ask-question-input:focus {
border-color: var(--border-accent);
background: var(--bg-secondary);
}
[data-theme="dark"] .ask-question-answer {
background: var(--bg-primary);
border-color: var(--border-primary);
color: var(--text-primary);
}
/* Modal dialog */
.llm-modal-backdrop {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
z-index: 9999;
background: rgba(0, 0, 0, 0.5);
justify-content: center;
align-items: center;
padding: 2rem;
}
.llm-modal-container {
background: var(--white);
border-radius: 20px;
max-width: 700px;
width: 100%;
max-height: 80vh;
display: flex;
flex-direction: column;
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3);
overflow: hidden;
}
.llm-modal-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: 1.2rem 1.5rem;
border-bottom: 2px solid var(--pink-200);
}
.llm-modal-title {
color: var(--pink-600);
font-size: 1.1rem;
margin: 0;
}
.llm-modal-close {
background: none;
border: none;
color: var(--pink-500);
font-size: 1.5rem;
cursor: pointer;
padding: 0.2rem 0.5rem;
line-height: 1;
transition: color 0.2s;
}
.llm-modal-close:hover {
color: var(--pink-700);
}
.llm-modal-body {
padding: 1.5rem;
overflow-y: auto;
flex: 1;
font-size: 0.95rem;
line-height: 1.7;
color: var(--pink-900);
white-space: pre-wrap;
}
[data-theme="dark"] .llm-modal-container {
background: var(--bg-card);
}
[data-theme="dark"] .llm-modal-header {
border-bottom-color: var(--border-primary);
}
[data-theme="dark"] .llm-modal-title {
color: var(--text-heading);
}
[data-theme="dark"] .llm-modal-close {
color: var(--text-secondary);
}
[data-theme="dark"] .llm-modal-close:hover {
color: var(--text-primary);
}
[data-theme="dark"] .llm-modal-body {
color: var(--text-primary);
}
/* Image generation styles */
.image-gen-prompt-area {
background: var(--white);
border-radius: 16px;
padding: 1.5rem;
margin-bottom: 1.5rem;
box-shadow: var(--shadow-lg);
border: 2px solid var(--pink-200);
display: flex;
flex-direction: column;
gap: 0.8rem;
}
.image-gen-prompt-area label {
font-size: 0.8rem;
font-weight: 700;
color: var(--pink-600);
text-transform: uppercase;
letter-spacing: 0.5px;
}
.image-gen-prompt-area textarea {
padding: 0.8rem 1rem;
border: 2px solid var(--pink-200);
border-radius: 12px;
font-size: 0.95rem;
color: var(--pink-900);
background: var(--pink-50);
font-family: inherit;
resize: none;
outline: none;
transition: border-color 0.2s, background 0.2s;
}
.image-gen-prompt-area textarea:focus {
border-color: var(--pink-400);
background: var(--white);
}
.image-gen-prompt-area textarea::placeholder {
color: var(--pink-300);
}
.generate-btn {
background: linear-gradient(135deg, var(--pink-500), var(--pink-600));
color: var(--white);
border: none;
padding: 0.7rem 1.5rem;
border-radius: 12px;
cursor: pointer;
font-size: 0.95rem;
font-weight: 700;
transition: background 0.2s, transform 0.1s, opacity 0.2s;
align-self: flex-start;
}
.generate-btn:hover:not(:disabled) {
background: linear-gradient(135deg, var(--pink-600), var(--pink-700));
transform: translateY(-1px);
}
.generate-btn:disabled {
opacity: 0.5;
cursor: not-allowed;
}
.image-gen-results {
background: var(--white);
border-radius: 16px;
border: 2px solid var(--pink-200);
box-shadow: var(--shadow-lg);
min-height: 200px;
max-height: 70vh;
overflow-y: auto;
padding: 1.5rem;
scroll-behavior: smooth;
}
.image-gen-placeholder {
display: flex;
align-items: center;
justify-content: center;
min-height: 200px;
}
.image-gen-placeholder p {
color: var(--pink-400);
font-style: italic;
font-size: 0.95rem;
}
.image-gen-loading {
color: var(--pink-500);
font-style: italic;
}
.image-gen-error {
color: var(--pink-700);
font-weight: 600;
text-align: center;
}
.generated-image-card {
background: var(--pink-50);
border: 2px solid var(--pink-200);
border-radius: 16px;
padding: 1.2rem;
margin-bottom: 1rem;
animation: llmFadeIn 0.3s ease-out;
}
.generated-image-prompt {
font-size: 0.9rem;
color: var(--pink-700);
margin-bottom: 0.8rem;
padding-bottom: 0.5rem;
border-bottom: 1px solid var(--pink-200);
font-weight: 600;
}
.generated-image-container {
display: flex;
justify-content: center;
align-items: center;
margin-bottom: 0.8rem;
border-radius: 12px;
overflow: hidden;
background: var(--white);
}
.generated-image {
max-width: 100%;
max-height: 512px;
width: auto;
height: auto;
display: block;
border-radius: 8px;
}
.generated-image-actions {
display: flex;
gap: 0.8rem;
}
.image-action-btn {
background: linear-gradient(135deg, var(--pink-400), var(--pink-500));
color: var(--white);
border: none;
padding: 0.5rem 1.2rem;
border-radius: 10px;
cursor: pointer;
font-size: 0.85rem;
font-weight: 700;
transition: background 0.2s, transform 0.1s;
}
.image-action-btn:hover {
background: linear-gradient(135deg, var(--pink-500), var(--pink-600));
transform: translateY(-1px);
}
/* Dark mode image generation styles */
[data-theme="dark"] .image-gen-prompt-area {
background: var(--bg-card);
border-color: var(--border-primary);
box-shadow: 0 10px 25px rgba(0,0,0,0.3);
}
[data-theme="dark"] .image-gen-prompt-area label {
color: var(--text-heading-strong);
}
[data-theme="dark"] .image-gen-prompt-area textarea {
background: var(--bg-primary);
border-color: var(--border-primary);
color: var(--text-primary);
}
[data-theme="dark"] .image-gen-prompt-area textarea:focus {
border-color: var(--border-accent);
background: var(--bg-secondary);
}
[data-theme="dark"] .image-gen-prompt-area textarea::placeholder {
color: var(--text-secondary);
}
[data-theme="dark"] .generate-btn {
background: linear-gradient(135deg, var(--pink-600), #0f3460);
}
[data-theme="dark"] .generate-btn:hover:not(:disabled) {
background: linear-gradient(135deg, var(--pink-500), #1a1a3e);
}
[data-theme="dark"] .image-gen-results {
background: var(--bg-card);
border-color: var(--border-primary);
box-shadow: 0 10px 25px rgba(0,0,0,0.3);
}
[data-theme="dark"] .image-gen-placeholder p {
color: var(--text-secondary);
}
[data-theme="dark"] .image-gen-loading {
color: var(--pink-400);
}
[data-theme="dark"] .image-gen-error {
color: var(--pink-300);
}
[data-theme="dark"] .generated-image-card {
background: var(--bg-primary);
border-color: var(--border-primary);
}
[data-theme="dark"] .generated-image-prompt {
color: var(--text-heading);
border-bottom-color: var(--border-primary);
}
[data-theme="dark"] .generated-image-container {
background: var(--bg-secondary);
}
[data-theme="dark"] .image-action-btn {
background: linear-gradient(135deg, var(--pink-600), #0f3460);
}
[data-theme="dark"] .image-action-btn:hover {
background: linear-gradient(135deg, var(--pink-500), #1a1a3e);
}

View File

@@ -19,11 +19,15 @@
<a href="/pages/prompts.html">Prompt Guide</a> <a href="/pages/prompts.html">Prompt Guide</a>
<a href="/pages/math.html">Math & Concepts</a> <a href="/pages/math.html">Math & Concepts</a>
<a href="/pages/chat.html">Chat</a> <a href="/pages/chat.html">Chat</a>
<a href="/pages/image-gen.html">Image Gen</a>
</div> </div>
<button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button> <button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button>
</div> </div>
</nav> </nav>
<button class="menu-toggle" id="menuToggle" aria-label="Toggle menu"></button>
<div class="sidebar-backdrop" id="sidebarBackdrop"></div>
<script> <script>
(function(){ (function(){
var btn = document.getElementById('darkToggle'); var btn = document.getElementById('darkToggle');
@@ -44,6 +48,34 @@
localStorage.setItem('theme','dark'); localStorage.setItem('theme','dark');
} }
}); });
var menuToggle = document.getElementById('menuToggle');
var nav = document.querySelector('nav');
var backdrop = document.getElementById('sidebarBackdrop');
if(menuToggle && nav){
menuToggle.addEventListener('click', function(){
nav.classList.toggle('sidebar-open');
var isOpen = nav.classList.contains('sidebar-open');
menuToggle.textContent = isOpen ? '✕' : '☰';
if(backdrop){
backdrop.classList.toggle('visible', isOpen);
}
});
if(backdrop){
backdrop.addEventListener('click', function(){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
backdrop.classList.remove('visible');
});
}
document.addEventListener('click', function(e){
if(nav.classList.contains('sidebar-open') && !nav.contains(e.target) && e.target !== menuToggle){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
if(backdrop) backdrop.classList.remove('visible');
}
});
}
})(); })();
</script> </script>
@@ -55,30 +87,42 @@
<div class="container"> <div class="container">
<h2 class="section-title">Browse Topics</h2> <h2 class="section-title">Browse Topics</h2>
<div class="cards"> <div class="cards">
<div class="card"> <a href="/pages/terminology.html" class="card-link">
<h3>📖 Terminology</h3> <div class="card">
<p>Key AI terms from ML and NLP — supervised learning, fine-tuning, tokens, embeddings, and more.</p> <h3>📖 Terminology</h3>
</div> <p>Key AI terms from ML and NLP — supervised learning, fine-tuning, tokens, embeddings, and more.</p>
<div class="card"> </div>
<h3>⚙️ Techniques</h3> </a>
<p>How AI models are trained and improved — backpropagation, RLHF, quantization, RAG, and more.</p> <a href="/pages/techniques.html" class="card-link">
</div> <div class="card">
<div class="card"> <h3>⚙️ Techniques</h3>
<h3>🎯 Use Cases</h3> <p>How AI models are trained and improved — backpropagation, RLHF, quantization, RAG, and more.</p>
<p>Where AI is used in the real world — healthcare, finance, creative work, customer support, and more.</p> </div>
</div> </a>
<div class="card"> <a href="/pages/use-cases.html" class="card-link">
<h3>🤖 Model Types</h3> <div class="card">
<p>LLMs, diffusion models, CNNs, GANs, transformers, and other AI architectures explained.</p> <h3>🎯 Use Cases</h3>
</div> <p>Where AI is used in the real world — healthcare, finance, creative work, customer support, and more.</p>
<div class="card"> </div>
<h3>✍️ Prompt Engineering</h3> </a>
<p>How to write effective prompts — zero-shot, few-shot, chain-of-thought, and structured prompts.</p> <a href="/pages/model-types.html" class="card-link">
</div> <div class="card">
<div class="card"> <h3>🤖 Model Types</h3>
<h3>📐 Math & Concepts</h3> <p>LLMs, diffusion models, CNNs, GANs, transformers, and other AI architectures explained.</p>
<p>Underlying concepts — loss functions, attention, temperature, perplexity, and accuracy metrics.</p> </div>
</div> </a>
<a href="/pages/prompts.html" class="card-link">
<div class="card">
<h3>✍️ Prompt Engineering</h3>
<p>How to write effective prompts — zero-shot, few-shot, chain-of-thought, and structured prompts.</p>
</div>
</a>
<a href="/pages/math.html" class="card-link">
<div class="card">
<h3>📐 Math & Concepts</h3>
<p>Underlying concepts — loss functions, attention, temperature, perplexity, and accuracy metrics.</p>
</div>
</a>
</div> </div>
<h2 class="section-title">Quick Start</h2> <h2 class="section-title">Quick Start</h2>
@@ -92,9 +136,80 @@
<h3>LLM vs Traditional ML</h3> <h3>LLM vs Traditional ML</h3>
<p>Traditional ML models are built for one specific task (e.g., classify spam). Large Language Models are general-purpose — trained on massive text corpora to understand and generate human language across countless tasks.</p> <p>Traditional ML models are built for one specific task (e.g., classify spam). Large Language Models are general-purpose — trained on massive text corpora to understand and generate human language across countless tasks.</p>
</div> </div>
<h2 class="section-title">Interactive</h2>
<div class="ask-question-section">
<div class="ask-question-box">
<h3>💬 Ask a Question</h3>
<p style="color: var(--pink-700); font-size: 0.95rem; margin-bottom: 1rem;">Ask anything about AI — terminology, concepts, techniques, or real-world applications. Powered by your configured LLM.</p>
<div class="ask-question-input-row">
<input class="ask-question-input" id="askInput" placeholder="Ask me anything about AI..." />
<button class="llm-btn" onclick="askQuestion()" style="padding: 0.6rem 1.5rem;"><span class="icon">Send</span></button>
</div>
<div class="ask-question-answer" id="askAnswer"></div>
</div>
</div>
<a href="/pages/chat.html" class="card-link" style="margin-top: 1.5rem; display: block;">
<div class="card">
<h3>💬 Full Chat</h3>
<p>Try AI right now — ask questions, brainstorm ideas, get explanations, or just experiment. Powered by a real LLM API.</p>
</div>
</a>
</div> </div>
<footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer> <footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer>
<script src="lib/llm.js"></script>
<script>
(function(){
function askQuestion() {
var input = document.getElementById('askInput');
var answer = document.getElementById('askAnswer');
var text = input.value.trim();
if (!text) return;
answer.classList.add('visible');
answer.innerHTML = '<span class="llm-loading">Thinking...</span>';
var cheatSheetContext = `You are an AI educator answering questions based on this cheat sheet content:
TERMINOLOGY: Machine Learning (ML), Supervised Learning, Unsupervised Learning, Reinforcement Learning, Overfitting, Underfitting, NLP, Token, Embedding, Context Window, Paraphrasing, Sentiment Analysis, LLM, Pre-trained Model, Fine-tuning, Parameters, Inference, Weights. Acronyms: AI, ML, DL, NLP, LLM, RLHF, RAG, API, SFT, PoC, GAN, CNN, AGI, STT/ASR, TTS.
TECHNIQUES: Backpropagation, Epoch, Batch Size, Learning Rate, Transfer Learning, Data Augmentation, RLHF, SFT, Prompt Tuning, LoRA, Quantization, Distillation, Speculative Decoding, RAG, Agent/Tool Use, Chain-of-Thought.
USE CASES: Content Generation, Image Generation, Video & Audio, Summarization, Code Generation, Debugging & Review, Documentation, Code Translation, Chatbots & Assistants, Data Analysis, Research & Search, Translation, Email & Meeting Assistants, Document Processing, Healthcare, Finance, Automotive, Education, Manufacturing, Legal.
MODEL TYPES: LLMs (GPT-4, Claude, Gemini, Llama, Mistral), Encoder-Only (BERT, RoBERTa), Decoder-Only (GPT, Claude, Llama), Encoder-Decoder (T5, BART), CNN (ResNet, EfficientNet), ViT (CLIP, DINOv2), Diffusion (Stable Diffusion, DALL-E), GAN (StyleGAN), VQ-VAE, Flow Models, RNN/LSTM, MoE, Retrieval Models, SLMs (Phi-3, Gemma).
PROMPT ENGINEERING: Zero-Shot, Few-Shot, Chain-of-Thought, Role Prompting, Structured Output, Self-Consistency, ReAct. Tips: Be specific, use delimiters, provide context, iterate.
MATH & CONCEPTS: Attention, Self-Attention, Multi-Head Attention, Positional Encoding, FFN, Layer Norm, Loss Function, Gradient Descent, Adam, Gradient, Regularization, Batch Norm, Temperature, Top-K, Top-P, Greedy Decoding, Beam Search, Logits, Perplexity, Accuracy, Precision & Recall, F1 Score, BLEU/ROUGE, TPS. Formulas: Attention=softmax(QKᵀ/√dₖ)V, Cross-Entropy=-Σyᵢlog(pᵢ), Softmax=eˣⁱ/Σeˣʲ, ReLU=max(0,x), LayerNorm=(x-μ)/σ×γ+β, F1=2×(P×R)/(P+R), Perplexity=2^(cross-entropy).`;
var messages = [
{ role: 'system', content: 'You are a helpful AI educator answering questions based on an AI cheat sheet. Use the context below to provide accurate, concise answers. If a question is outside the cheat sheet scope, say so but try to help anyway. Keep answers to 2-4 short paragraphs. Use formatting like bold text and code blocks where helpful.' },
{ role: 'user', content: cheatSheetContext + '\n\nQuestion: ' + text }
];
LLM.chatWithHistory('askAnswer', messages)
.then(function() {
answer.innerHTML = answer.innerHTML.replace('<span class="llm-loading">Thinking...</span>', '');
})
.catch(function() {});
}
var askInput = document.getElementById('askInput');
if (askInput) {
askInput.addEventListener('keydown', function(e) {
if (e.key === 'Enter') {
e.preventDefault();
askQuestion();
}
});
}
window.askQuestion = askQuestion;
})();
</script>
</body> </body>
</html> </html>

180
lib/llm.js Normal file
View File

@@ -0,0 +1,180 @@
// Shared LLM module - provides streaming chat to any page
var LLM = (function() {
var defaultApiUrl = 'https://llama-instruct.reeselink.com/v1';
var defaultModel = 'instruct';
function getConfig() {
return {
apiUrl: localStorage.getItem('apiUrl') || defaultApiUrl,
token: localStorage.getItem('apiToken') || '',
model: localStorage.getItem('modelName') || defaultModel
};
}
function callAPI(messages, onChunk, onComplete, onError) {
var config = getConfig();
var apiUrl = config.apiUrl.replace(/\/+$/, '');
var headers = { 'Content-Type': 'application/json' };
if (config.token) {
headers['Authorization'] = 'Bearer ' + config.token;
}
fetch(apiUrl + '/chat/completions', {
method: 'POST',
headers: headers,
body: JSON.stringify({
messages: messages,
model: config.model,
stream: true
})
})
.then(function(response) {
if (!response.ok) {
throw new Error('API error: ' + response.status + ' ' + response.statusText);
}
var reader = response.body.getReader();
var decoder = new TextDecoder();
var buffer = '';
function read() {
return reader.read().then(function(result) {
var done = result.done;
var value = result.value;
if (done) {
onComplete();
return;
}
buffer += decoder.decode(value, { stream: true });
var lines = buffer.split('\n');
buffer = lines.pop();
for (var i = 0; i < lines.length; i++) {
var line = lines[i].trim();
if (line.startsWith('data: ')) {
var data = line.slice(6);
if (data === '[DONE]') {
onComplete();
return;
}
try {
var json = JSON.parse(data);
var delta = json.choices && json.choices[0] && json.choices[0].delta;
if (delta && delta.content) {
onChunk(delta.content);
}
} catch (e) {
// skip malformed JSON
}
}
}
return read();
});
}
return read();
})
.catch(function(err) {
onError(err.message);
});
}
function chat(elementId, systemPrompt, userMessage) {
var messages = [];
if (systemPrompt) {
messages.push({ role: 'system', content: systemPrompt });
}
messages.push({ role: 'user', content: userMessage });
return new Promise(function(resolve, reject) {
var outputEl = document.getElementById(elementId);
if (!outputEl) { reject(new Error('Output element not found')); return; }
outputEl.innerHTML = '<span class="llm-loading">Thinking...</span>';
var fullText = '';
var history = systemPrompt ? messages : [];
callAPI(
history,
function(chunk) {
fullText += chunk;
outputEl.innerHTML = formatMarkdown(fullText);
outputEl.style.whiteSpace = 'pre-wrap';
},
function() {
resolve(fullText);
},
function(err) {
outputEl.innerHTML = '<span class="llm-error">Error: ' + escapeHTML(err) + '</span>';
reject(err);
}
);
});
}
function chatWithHistory(elementId, history, onDone) {
return new Promise(function(resolve, reject) {
var outputEl = document.getElementById(elementId);
if (!outputEl) { reject(new Error('Output element not found')); return; }
outputEl.innerHTML = '<span class="llm-loading">Thinking...</span>';
var fullText = '';
callAPI(
history,
function(chunk) {
fullText += chunk;
outputEl.innerHTML = formatMarkdown(fullText);
outputEl.style.whiteSpace = 'pre-wrap';
},
function() {
if (onDone) onDone();
resolve(fullText);
},
function(err) {
outputEl.innerHTML = '<span class="llm-error">Error: ' + escapeHTML(err) + '</span>';
reject(err);
}
);
});
}
function escapeHTML(str) {
var div = document.createElement('div');
div.textContent = str;
return div.innerHTML;
}
function formatMarkdown(text) {
// Extract code blocks first to protect them from escaping
var codeBlocks = [];
text = text.replace(/```([\s\S]*?)```/g, function(match, code) {
var placeholder = '%%CODEBLOCK' + codeBlocks.length + '%%';
codeBlocks.push('<pre class="llm-code-block"><code>' + escapeHTML(code) + '</code></pre>');
return placeholder;
});
// Escape remaining HTML
text = escapeHTML(text);
// Restore code blocks
for (var i = 0; i < codeBlocks.length; i++) {
text = text.replace('%%CODEBLOCK' + i + '%%', codeBlocks[i]);
}
// Inline code
text = text.replace(/`([^`]+)`/g, '<code class="llm-inline-code">$1</code>');
// Bold
text = text.replace(/\*\*([^*]+)\*\*/g, '<strong>$1</strong>');
// Line breaks
text = text.replace(/\n/g, '<br>');
return text;
}
return {
getConfig: getConfig,
callAPI: callAPI,
chat: chat,
chatWithHistory: chatWithHistory
};
})();

117
lib/modal.js Normal file
View File

@@ -0,0 +1,117 @@
// Shared modal for LLM explanations
var LLMModal = (function() {
var modalEl = null;
var contentEl = null;
var isOpen = false;
function createModal() {
if (modalEl) return;
modalEl = document.createElement('div');
modalEl.className = 'llm-modal-backdrop';
modalEl.style.cssText = 'display:none; position:fixed; top:0; left:0; width:100%; height:100%; z-index:9999; background:rgba(0,0,0,0.5); justify-content:center; align-items:center; padding:2rem;';
var container = document.createElement('div');
container.className = 'llm-modal-container';
container.style.cssText = 'background:var(--white); border-radius:20px; max-width:700px; width:100%; max-height:80vh; display:flex; flex-direction:column; box-shadow:0 20px 60px rgba(0,0,0,0.3); overflow:hidden;';
var header = document.createElement('div');
header.className = 'llm-modal-header';
header.style.cssText = 'display:flex; justify-content:space-between; align-items:center; padding:1.2rem 1.5rem; border-bottom:2px solid var(--pink-200);';
var title = document.createElement('h3');
title.className = 'llm-modal-title';
title.style.cssText = 'color:var(--pink-600); font-size:1.1rem; margin:0;';
var closeBtn = document.createElement('button');
closeBtn.className = 'llm-modal-close';
closeBtn.innerHTML = '&#x2715;';
closeBtn.style.cssText = 'background:none; border:none; color:var(--pink-500); font-size:1.5rem; cursor:pointer; padding:0.2rem 0.5rem; line-height:1; transition:color 0.2s;';
closeBtn.addEventListener('mouseenter', function() { this.style.color = 'var(--pink-700)'; });
closeBtn.addEventListener('mouseleave', function() { this.style.color = 'var(--pink-500)'; });
closeBtn.addEventListener('click', close);
header.appendChild(title);
header.appendChild(closeBtn);
var body = document.createElement('div');
body.className = 'llm-modal-body';
body.id = 'llm-modal-content';
body.style.cssText = 'padding:1.5rem; overflow-y:auto; flex:1; font-size:0.95rem; line-height:1.7; color:var(--pink-900); white-space:pre-wrap;';
container.appendChild(header);
container.appendChild(body);
modalEl.appendChild(container);
document.body.appendChild(modalEl);
modalEl.addEventListener('click', function(e) {
if (e.target === modalEl) close();
});
document.addEventListener('keydown', function(e) {
if (e.key === 'Escape' && isOpen) close();
});
modalEl.style.display = 'flex';
}
function open(titleText) {
if (!modalEl) createModal();
var title = modalEl.querySelector('.llm-modal-title');
title.textContent = titleText;
contentEl = document.getElementById('llm-modal-content');
contentEl.innerHTML = '<span class="llm-loading">Thinking...</span>';
modalEl.style.display = 'flex';
isOpen = true;
}
function update(text) {
if (contentEl) {
contentEl.innerHTML = formatMarkdown(text);
}
}
function done() {
// nothing to do, modal stays open
}
function error(msg) {
if (contentEl) {
contentEl.innerHTML = '<span class="llm-error">Error: ' + escapeHTML(msg) + '</span>';
}
}
function close() {
if (modalEl) {
modalEl.style.display = 'none';
isOpen = false;
}
}
function escapeHTML(str) {
var div = document.createElement('div');
div.textContent = str;
return div.innerHTML;
}
function formatMarkdown(text) {
// Escape HTML first
text = escapeHTML(text);
// Code blocks (must be before inline code)
text = text.replace(/```([\s\S]*?)```/g, '<pre class="llm-code-block"><code>$1</code></pre>');
// Inline code
text = text.replace(/`([^`]+)`/g, '<code class="llm-inline-code">$1</code>');
// Bold
text = text.replace(/\*\*([^*]+)\*\*/g, '<strong>$1</strong>');
// Line breaks
text = text.replace(/\n/g, '<br>');
return text;
}
return {
open: open,
update: update,
done: done,
error: error,
close: close
};
})();

View File

@@ -10,7 +10,7 @@
<nav> <nav>
<div class="nav-inner"> <div class="nav-inner">
<a href="index.html" class="nav-brand">AI Cheat Sheet</a> <a href="../index.html" class="nav-brand">AI Cheat Sheet</a>
<div class="nav-links"> <div class="nav-links">
<a href="/pages/terminology.html">Terminology</a> <a href="/pages/terminology.html">Terminology</a>
<a href="/pages/techniques.html">Techniques</a> <a href="/pages/techniques.html">Techniques</a>
@@ -19,11 +19,15 @@
<a href="/pages/prompts.html">Prompt Guide</a> <a href="/pages/prompts.html">Prompt Guide</a>
<a href="/pages/math.html">Math & Concepts</a> <a href="/pages/math.html">Math & Concepts</a>
<a href="/pages/chat.html" class="active">Chat</a> <a href="/pages/chat.html" class="active">Chat</a>
<a href="/pages/image-gen.html">Image Gen</a>
</div> </div>
<button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button> <button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button>
</div> </div>
</nav> </nav>
<button class="menu-toggle" id="menuToggle" aria-label="Toggle menu"></button>
<div class="sidebar-backdrop" id="sidebarBackdrop"></div>
<script> <script>
(function(){ (function(){
var btn = document.getElementById('darkToggle'); var btn = document.getElementById('darkToggle');
@@ -44,6 +48,34 @@
localStorage.setItem('theme','dark'); localStorage.setItem('theme','dark');
} }
}); });
var menuToggle = document.getElementById('menuToggle');
var nav = document.querySelector('nav');
var backdrop = document.getElementById('sidebarBackdrop');
if(menuToggle && nav){
menuToggle.addEventListener('click', function(){
nav.classList.toggle('sidebar-open');
var isOpen = nav.classList.contains('sidebar-open');
menuToggle.textContent = isOpen ? '✕' : '☰';
if(backdrop){
backdrop.classList.toggle('visible', isOpen);
}
});
if(backdrop){
backdrop.addEventListener('click', function(){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
backdrop.classList.remove('visible');
});
}
document.addEventListener('click', function(e){
if(nav.classList.contains('sidebar-open') && !nav.contains(e.target) && e.target !== menuToggle){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
if(backdrop) backdrop.classList.remove('visible');
}
});
}
})(); })();
</script> </script>

349
pages/image-gen.html Normal file
View File

@@ -0,0 +1,349 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Image Generation - AI Cheat Sheet</title>
<link rel="stylesheet" href="../css/style.css">
</head>
<body>
<nav>
<div class="nav-inner">
<a href="../index.html" class="nav-brand">AI Cheat Sheet</a>
<div class="nav-links">
<a href="/pages/terminology.html">Terminology</a>
<a href="/pages/techniques.html">Techniques</a>
<a href="/pages/use-cases.html">Use Cases</a>
<a href="/pages/model-types.html">Model Types</a>
<a href="/pages/prompts.html">Prompt Guide</a>
<a href="/pages/math.html">Math & Concepts</a>
<a href="/pages/chat.html">Chat</a>
<a href="/pages/image-gen.html" class="active">Image Gen</a>
</div>
<button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button>
</div>
</nav>
<button class="menu-toggle" id="menuToggle" aria-label="Toggle menu"></button>
<div class="sidebar-backdrop" id="sidebarBackdrop"></div>
<script>
(function(){
var btn = document.getElementById('darkToggle');
var saved = localStorage.getItem('theme');
if(saved === 'dark' || (!saved && window.matchMedia('(prefers-color-scheme: dark)').matches)){
document.documentElement.setAttribute('data-theme','dark');
btn.textContent = '☀️';
}
btn.addEventListener('click', function(){
var isDark = document.documentElement.getAttribute('data-theme') === 'dark';
if(isDark){
document.documentElement.removeAttribute('data-theme');
btn.textContent = '🌙';
localStorage.setItem('theme','light');
} else {
document.documentElement.setAttribute('data-theme','dark');
btn.textContent = '☀️';
localStorage.setItem('theme','dark');
}
});
var menuToggle = document.getElementById('menuToggle');
var nav = document.querySelector('nav');
var backdrop = document.getElementById('sidebarBackdrop');
if(menuToggle && nav){
menuToggle.addEventListener('click', function(){
nav.classList.toggle('sidebar-open');
var isOpen = nav.classList.contains('sidebar-open');
menuToggle.textContent = isOpen ? '✕' : '☰';
if(backdrop){
backdrop.classList.toggle('visible', isOpen);
}
});
if(backdrop){
backdrop.addEventListener('click', function(){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
backdrop.classList.remove('visible');
});
}
document.addEventListener('click', function(e){
if(nav.classList.contains('sidebar-open') && !nav.contains(e.target) && e.target !== menuToggle){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
if(backdrop) backdrop.classList.remove('visible');
}
});
}
})();
</script>
<div class="hero">
<h1>Image Generation</h1>
<p>Generate images from text prompts using AI.</p>
</div>
<div class="container">
<div class="chat-config">
<div class="config-row">
<label for="apiUrl">API Endpoint</label>
<input type="text" id="apiUrl" value="https://image-gen.reeselink.com/v1">
</div>
<div class="config-row">
<label for="apiToken">API Token</label>
<input type="password" id="apiToken" placeholder="Enter your API token">
</div>
<div class="config-row">
<label for="modelName">Model</label>
<input type="text" id="modelName" value="flux">
</div>
</div>
<div class="image-gen-prompt-area">
<label for="promptInput">Prompt</label>
<textarea id="promptInput" placeholder="Describe the image you want to generate..." rows="3"></textarea>
<button id="generateBtn" class="generate-btn">Generate</button>
</div>
<div class="image-gen-results" id="imageResults">
<div class="image-gen-placeholder" id="imagePlaceholder">
<p>Your generated image will appear here.</p>
</div>
</div>
</div>
<footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer>
<script>
(function(){
var apiUrlInput = document.getElementById('apiUrl');
var apiTokenInput = document.getElementById('apiToken');
var modelNameInput = document.getElementById('modelName');
var promptInput = document.getElementById('promptInput');
var generateBtn = document.getElementById('generateBtn');
var imageResults = document.getElementById('imageResults');
var imagePlaceholder = document.getElementById('imagePlaceholder');
var savedToken = localStorage.getItem('imageGenToken');
if(savedToken) apiTokenInput.value = savedToken;
var savedModel = localStorage.getItem('imageGenModel');
if(savedModel) modelNameInput.value = savedModel;
var savedUrl = localStorage.getItem('imageGenUrl');
if(savedUrl) apiUrlInput.value = savedUrl;
var isGenerating = false;
function setLoading(loading) {
isGenerating = loading;
generateBtn.disabled = loading;
generateBtn.textContent = loading ? 'Generating...' : 'Generate';
promptInput.disabled = loading;
}
function showImage(dataUrl, prompt) {
if(imagePlaceholder) {
imagePlaceholder.style.display = 'none';
}
var existingImages = imageResults.querySelectorAll('.generated-image-card');
for(var i = 0; i < existingImages.length; i++) {
existingImages[i].remove();
}
var card = document.createElement('div');
card.className = 'generated-image-card';
var promptText = document.createElement('p');
promptText.className = 'generated-image-prompt';
promptText.textContent = prompt;
var imgContainer = document.createElement('div');
imgContainer.className = 'generated-image-container';
var img = document.createElement('img');
img.className = 'generated-image';
img.src = dataUrl;
img.alt = prompt;
imgContainer.appendChild(img);
var actions = document.createElement('div');
actions.className = 'generated-image-actions';
var copyBtn = document.createElement('button');
copyBtn.className = 'image-action-btn';
copyBtn.textContent = 'Copy Image';
copyBtn.addEventListener('click', function() {
copyToClipboard(dataUrl, copyBtn);
});
var downloadBtn = document.createElement('button');
downloadBtn.className = 'image-action-btn';
downloadBtn.textContent = 'Download';
downloadBtn.addEventListener('click', function() {
var a = document.createElement('a');
a.href = dataUrl;
a.download = 'generated-image-' + Date.now() + '.png';
a.click();
});
actions.appendChild(copyBtn);
actions.appendChild(downloadBtn);
card.appendChild(promptText);
card.appendChild(imgContainer);
card.appendChild(actions);
imageResults.appendChild(card);
imageResults.scrollTop = imageResults.scrollHeight;
}
function copyToClipboard(dataUrl, copyBtn) {
var xhr = new XMLHttpRequest();
xhr.open('GET', dataUrl, true);
xhr.responseType = 'blob';
xhr.onload = function() {
var blob = xhr.response;
if(navigator.clipboard && navigator.clipboard.write) {
navigator.clipboard.write([
new ClipboardItem({ 'image/png': blob })
]).then(function() {
copyBtn.textContent = 'Copied!';
setTimeout(function() {
copyBtn.textContent = 'Copy Image';
}, 2000);
}).catch(function() {
fallbackCopy(dataUrl);
});
} else {
fallbackCopy(dataUrl);
}
};
xhr.send();
}
function fallbackCopy(dataUrl) {
var textarea = document.createElement('textarea');
textarea.value = dataUrl;
document.body.appendChild(textarea);
textarea.select();
document.execCommand('copy');
document.body.removeChild(textarea);
alert('Image URL copied to clipboard.');
}
function showError(message) {
if(imagePlaceholder) {
imagePlaceholder.style.display = 'block';
imagePlaceholder.innerHTML = '<p class="image-gen-error">' + message + '</p>';
}
}
function generate() {
if(isGenerating) return;
var prompt = promptInput.value.trim();
if(!prompt) {
alert('Please enter a prompt.');
promptInput.focus();
return;
}
var model = modelNameInput.value.trim();
if(!model) {
alert('Please enter a model name.');
modelNameInput.focus();
return;
}
setLoading(true);
if(imagePlaceholder) {
imagePlaceholder.style.display = 'block';
imagePlaceholder.innerHTML = '<p class="image-gen-loading">Generating image... This may take a few seconds.</p>';
}
var existingImages = imageResults.querySelectorAll('.generated-image-card');
for(var i = 0; i < existingImages.length; i++) {
existingImages[i].remove();
}
var apiEndpoint = apiUrlInput.value.trim().replace(/\/+$/, '');
var apiToken = apiTokenInput.value.trim();
var headers = { 'Content-Type': 'application/json' };
if(apiToken) {
headers['Authorization'] = 'Bearer ' + apiToken;
}
fetch(apiEndpoint + '/images/generations', {
method: 'POST',
headers: headers,
body: JSON.stringify({
prompt: prompt,
model: model,
n: 1,
size: '1024x1024'
})
})
.then(function(response) {
if(!response.ok) {
return response.text().then(function(text) {
throw new Error('API error ' + response.status + ': ' + (text || response.statusText));
});
}
return response.json();
})
.then(function(data) {
setLoading(false);
if(data.data && data.data[0]) {
var imageData = data.data[0];
var dataUrl;
if(imageData.url) {
dataUrl = imageData.url;
} else if(imageData.b64_json) {
dataUrl = 'data:image/png;base64,' + imageData.b64_json;
} else {
showError('Unexpected response format from the API.');
return;
}
showImage(dataUrl, prompt);
} else {
showError('No image returned from the API.');
}
})
.catch(function(err) {
setLoading(false);
showError('Error: ' + err.message);
});
}
generateBtn.addEventListener('click', generate);
promptInput.addEventListener('keydown', function(e) {
if(e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
generate();
}
});
promptInput.addEventListener('input', function() {
this.style.height = 'auto';
this.style.height = Math.min(this.scrollHeight, 200) + 'px';
});
apiTokenInput.addEventListener('change', function() {
localStorage.setItem('imageGenToken', this.value);
});
modelNameInput.addEventListener('change', function() {
localStorage.setItem('imageGenModel', this.value);
});
apiUrlInput.addEventListener('change', function() {
localStorage.setItem('imageGenUrl', this.value);
});
promptInput.focus();
})();
</script>
</body>
</html>

View File

@@ -10,7 +10,7 @@
<nav> <nav>
<div class="nav-inner"> <div class="nav-inner">
<a href="/" class="nav-brand">AI Cheat Sheet</a> <a href="../index.html" class="nav-brand">AI Cheat Sheet</a>
<div class="nav-links"> <div class="nav-links">
<a href="/pages/terminology.html">Terminology</a> <a href="/pages/terminology.html">Terminology</a>
<a href="/pages/techniques.html">Techniques</a> <a href="/pages/techniques.html">Techniques</a>
@@ -19,11 +19,15 @@
<a href="/pages/prompts.html">Prompt Guide</a> <a href="/pages/prompts.html">Prompt Guide</a>
<a href="/pages/math.html" class="active">Math & Concepts</a> <a href="/pages/math.html" class="active">Math & Concepts</a>
<a href="/pages/chat.html">Chat</a> <a href="/pages/chat.html">Chat</a>
<a href="/pages/image-gen.html">Image Gen</a>
</div> </div>
<button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button> <button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button>
</div> </div>
</nav> </nav>
<button class="menu-toggle" id="menuToggle" aria-label="Toggle menu"></button>
<div class="sidebar-backdrop" id="sidebarBackdrop"></div>
<script> <script>
(function(){ (function(){
var btn = document.getElementById('darkToggle'); var btn = document.getElementById('darkToggle');
@@ -44,6 +48,34 @@
localStorage.setItem('theme','dark'); localStorage.setItem('theme','dark');
} }
}); });
var menuToggle = document.getElementById('menuToggle');
var nav = document.querySelector('nav');
var backdrop = document.getElementById('sidebarBackdrop');
if(menuToggle && nav){
menuToggle.addEventListener('click', function(){
nav.classList.toggle('sidebar-open');
var isOpen = nav.classList.contains('sidebar-open');
menuToggle.textContent = isOpen ? '✕' : '☰';
if(backdrop){
backdrop.classList.toggle('visible', isOpen);
}
});
if(backdrop){
backdrop.addEventListener('click', function(){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
backdrop.classList.remove('visible');
});
}
document.addEventListener('click', function(e){
if(nav.classList.contains('sidebar-open') && !nav.contains(e.target) && e.target !== menuToggle){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
if(backdrop) backdrop.classList.remove('visible');
}
});
}
})(); })();
</script> </script>
@@ -60,32 +92,38 @@
<h3>Attention Mechanism</h3> <h3>Attention Mechanism</h3>
<p>A way for the model to weigh the importance of different parts of the input when processing each token. "Attention is all you need" — the 2017 paper that launched the transformer revolution.</p> <p>A way for the model to weigh the importance of different parts of the input when processing each token. "Attention is all you need" — the 2017 paper that launched the transformer revolution.</p>
<div class="example"><strong>Analogy:</strong> When reading a sentence, you naturally pay more attention to certain words. "The cat that chased the mouse hid" — you attend to "cat" when processing "hid".</div> <div class="example"><strong>Analogy:</strong> When reading a sentence, you naturally pay more attention to certain words. "The cat that chased the mouse hid" — you attend to "cat" when processing "hid".</div>
<button class="llm-btn" onclick="walkThrough('📐 Attention Mechanism', 'Walk through attention with a concrete numerical example. If we have tokens [A, B, C] with embeddings [1,0,0], [0,1,0], [0,0,1], show how Q, K, V are computed and how attention weights are calculated step by step.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Architecture</span> <span class="category">Architecture</span>
<h3>Self-Attention</h3> <h3>Self-Attention</h3>
<p>Each token in a sequence attends to every other token, creating rich contextual representations. The core of the transformer architecture.</p> <p>Each token in a sequence attends to every other token, creating rich contextual representations. The core of the transformer architecture.</p>
<div class="example"><strong>Math:</strong> Attention(Q, K, V) = softmax(QKᵀ / √dₖ) V</div> <div class="example"><strong>Math:</strong> Attention(Q, K, V) = softmax(QKᵀ / √dₖ) V</div>
<button class="llm-btn" onclick="walkThrough('📐 Self-Attention', 'Walk through the self-attention formula step by step with a simple example. Show how Q, K, V matrices are computed, how the dot product creates attention scores, how softmax normalizes them, and how the final output is a weighted sum of V.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Architecture</span> <span class="category">Architecture</span>
<h3>Multi-Head Attention</h3> <h3>Multi-Head Attention</h3>
<p>Running multiple self-attention operations in parallel, each learning different types of relationships. Like having multiple "lenses" to view the input.</p> <p>Running multiple self-attention operations in parallel, each learning different types of relationships. Like having multiple "lenses" to view the input.</p>
<button class="llm-btn" onclick="walkThrough('📐 Multi-Head Attention', 'Walk through multi-head attention. If we have 4 heads with d_model=128, show how the model splits into 4 heads of dimension 32, runs self-attention on each, concatenates them, and projects back to dimension 128.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Architecture</span> <span class="category">Architecture</span>
<h3>Positional Encoding</h3> <h3>Positional Encoding</h3>
<p>Since transformers process all tokens simultaneously (unlike RNNs), position information must be added explicitly so the model knows word order.</p> <p>Since transformers process all tokens simultaneously (unlike RNNs), position information must be added explicitly so the model knows word order.</p>
<button class="llm-btn" onclick="walkThrough('📐 Positional Encoding', 'Walk through positional encoding. Show how sinusoidal positional encodings work with a concrete example. If we have positions 0, 1, 2, 3 and dimension 4, show the actual encoding vectors and explain why sin/cos are used.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Architecture</span> <span class="category">Architecture</span>
<h3>Feed-Forward Network (FFN)</h3> <h3>Feed-Forward Network (FFN)</h3>
<p>After attention, each token passes through a small neural network that transforms its representation. Usually two linear layers with a non-linearity in between.</p> <p>After attention, each token passes through a small neural network that transforms its representation. Usually two linear layers with a non-linearity in between.</p>
<button class="llm-btn" onclick="walkThrough('📐 Feed-Forward Network (FFN)', 'Walk through the FFN in a transformer. Show the dimensions: if d_model=512 and d_ff=2048, show how each token goes through Linear(512, 2048) → ReLU → Linear(2048, 512). Give a concrete numerical example.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Architecture</span> <span class="category">Architecture</span>
<h3>Layer Normalization</h3> <h3>Layer Normalization</h3>
<p>A technique to stabilize training by normalizing the activations of each layer. Helps gradients flow more smoothly through deep networks.</p> <p>A technique to stabilize training by normalizing the activations of each layer. Helps gradients flow more smoothly through deep networks.</p>
<button class="llm-btn" onclick="walkThrough('📐 Layer Normalization', 'Walk through layer normalization step by step. If a layer outputs [2, -1, 3, 0] for a single token, show how to compute the mean, variance, normalize, and then apply the learnable parameters γ and β.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<h2 class="section-title">Training Concepts</h2> <h2 class="section-title">Training Concepts</h2>
@@ -94,31 +132,37 @@
<h3>Loss Function</h3> <h3>Loss Function</h3>
<p>A mathematical measure of how far the model's predictions are from the correct answers. Training = minimizing this value. For language models, cross-entropy loss is standard.</p> <p>A mathematical measure of how far the model's predictions are from the correct answers. Training = minimizing this value. For language models, cross-entropy loss is standard.</p>
<div class="example"><strong>Example:</strong> If the correct next word is "cat" but the model assigns it 10% probability, the loss is high. If it assigns 90%, the loss is low.</div> <div class="example"><strong>Example:</strong> If the correct next word is "cat" but the model assigns it 10% probability, the loss is high. If it assigns 90%, the loss is low.</div>
<button class="llm-btn" onclick="walkThrough('📐 Loss Function (Cross-Entropy)', 'Walk through cross-entropy loss with a concrete example. If the true class is "cat" and the model outputs probabilities [dog: 0.5, cat: 0.3, bird: 0.2], show the exact calculation of -log(0.3) and explain what this number means.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Training</span> <span class="category">Training</span>
<h3>Gradient Descent</h3> <h3>Gradient Descent</h3>
<p>The optimization algorithm that adjusts model weights in the direction that reduces loss. "Descent" because you're moving down the loss surface toward a minimum.</p> <p>The optimization algorithm that adjusts model weights in the direction that reduces loss. "Descent" because you're moving down the loss surface toward a minimum.</p>
<button class="llm-btn" onclick="walkThrough('📐 Gradient Descent', 'Walk through gradient descent with a concrete example. If loss(w) = (w - 3)², compute the gradient, show a weight update with learning rate 0.1 starting from w=0. Do 3 iterations and show how w approaches 3.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Training</span> <span class="category">Training</span>
<h3>Adam Optimizer</h3> <h3>Adam Optimizer</h3>
<p>The most popular optimizer for training deep learning models. Combines momentum (acceleration) with adaptive learning rates (per-parameter tuning).</p> <p>The most popular optimizer for training deep learning models. Combines momentum (acceleration) with adaptive learning rates (per-parameter tuning).</p>
<button class="llm-btn" onclick="walkThrough('📐 Adam Optimizer', 'Walk through Adam optimizer step by step. Starting from gradient g=2 at step t=1, show how m (momentum) and v (variance) are computed with β1=0.9, β2=0.999, then show bias correction and the parameter update with lr=0.001.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Training</span> <span class="category">Training</span>
<h3>Gradient</h3> <h3>Gradient</h3>
<p>A vector of partial derivatives showing the direction and rate of steepest increase of the loss. We move in the opposite direction to minimize loss.</p> <p>A vector of partial derivatives showing the direction and rate of steepest increase of the loss. We move in the opposite direction to minimize loss.</p>
<button class="llm-btn" onclick="walkThrough('📐 Gradient', 'Walk through what a gradient is with a concrete example. If loss = w1² + 2w2², compute ∂loss/∂w1 and ∂loss/∂w2 at the point (w1=1, w2=2). Show what the gradient vector looks like and what direction we move in.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Training</span> <span class="category">Training</span>
<h3>Regularization</h3> <h3>Regularization</h3>
<p>Techniques to prevent overfitting: dropout (randomly deactivating neurons), weight decay (penalizing large weights), and early stopping.</p> <p>Techniques to prevent overfitting: dropout (randomly deactivating neurons), weight decay (penalizing large weights), and early stopping.</p>
<button class="llm-btn" onclick="walkThrough('📐 Regularization', 'Walk through L2 regularization (weight decay) with a concrete example. If loss = MSE + λ·Σw², show how the gradient changes with λ=0.01 vs λ=0. What happens to the weights over training?')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Training</span> <span class="category">Training</span>
<h3>Batch Normalization</h3> <h3>Batch Normalization</h3>
<p>Normalizing layer inputs across each mini-batch. Reduces internal covariate shift and allows higher learning rates.</p> <p>Normalizing layer inputs across each mini-batch. Reduces internal covariate shift and allows higher learning rates.</p>
<button class="llm-btn" onclick="walkThrough('📐 Batch Normalization', 'Walk through batch normalization step by step. If a mini-batch has activations [1, 3, 5, 7], show how to compute the batch mean and variance, normalize, and apply γ=1.5, β=0.5. Show the final output.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<h2 class="section-title">Generation & Sampling</h2> <h2 class="section-title">Generation & Sampling</h2>
@@ -128,32 +172,38 @@
<p>Controls randomness in text generation. Low (0.2) = focused and deterministic. High (0.9) = creative and varied. 1.0 = standard sampling.</p> <p>Controls randomness in text generation. Low (0.2) = focused and deterministic. High (0.9) = creative and varied. 1.0 = standard sampling.</p>
<div class="example"><strong>Low temp:</strong> Technical documentation, code generation<br> <div class="example"><strong>Low temp:</strong> Technical documentation, code generation<br>
<strong>High temp:</strong> Creative writing, brainstorming</div> <strong>High temp:</strong> Creative writing, brainstorming</div>
<button class="llm-btn" onclick="walkThrough('📐 Temperature in Sampling', 'Walk through temperature with a concrete example. If a model outputs logits [2.0, 1.0, 0.0] for tokens [cat, dog, bird], show the probabilities at temperature=0.1, temperature=1.0, and temperature=2.0. Show the softmax math step by step.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Sampling</span> <span class="category">Sampling</span>
<h3>Top-K Sampling</h3> <h3>Top-K Sampling</h3>
<p>At each step, only consider the K most likely next tokens. Reduces weird or irrelevant outputs.</p> <p>At each step, only consider the K most likely next tokens. Reduces weird or irrelevant outputs.</p>
<button class="llm-btn" onclick="walkThrough('📐 Top-K Sampling', 'Walk through Top-K sampling with an example. If a model outputs probabilities [cat: 0.4, dog: 0.25, bird: 0.15, fish: 0.1, car: 0.05, house: 0.03, tree: 0.02], show what happens with K=3 vs K=10. Show the renormalization.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Sampling</span> <span class="category">Sampling</span>
<h3>Top-P (Nucleus) Sampling</h3> <h3>Top-P (Nucleus) Sampling</h3>
<p>Only consider tokens whose cumulative probability reaches P. More adaptive than Top-K — automatically adjusts the number of candidates.</p> <p>Only consider tokens whose cumulative probability reaches P. More adaptive than Top-K — automatically adjusts the number of candidates.</p>
<div class="example"><strong>Top-P = 0.9:</strong> Include the smallest set of tokens that together cover 90% probability mass.</div> <div class="example"><strong>Top-P = 0.9:</strong> Include the smallest set of tokens that together cover 90% probability mass.</div>
<button class="llm-btn" onclick="walkThrough('📐 Top-P (Nucleus) Sampling', 'Walk through Top-P sampling with the same example: [cat: 0.4, dog: 0.25, bird: 0.15, fish: 0.1, car: 0.05, house: 0.03, tree: 0.02]. Show which tokens are included at P=0.9 and P=0.95, and how the probabilities are renormalized.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Sampling</span> <span class="category">Sampling</span>
<h3>Greedy Decoding</h3> <h3>Greedy Decoding</h3>
<p>Always pick the most likely next token. Fastest but can get stuck in repetitive loops. Often produces the most coherent output for factual tasks.</p> <p>Always pick the most likely next token. Fastest but can get stuck in repetitive loops. Often produces the most coherent output for factual tasks.</p>
<button class="llm-btn" onclick="walkThrough('📐 Greedy Decoding', 'Walk through greedy decoding vs beam search with a concrete example. If at step 1 the model outputs [the: 0.5, a: 0.3, I: 0.2], show what greedy picks. At step 2, if the continuation probabilities depend on the first token, show how greedy might get stuck vs beam search.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Sampling</span> <span class="category">Sampling</span>
<h3>Beam Search</h3> <h3>Beam Search</h3>
<p>Instead of picking the single best token at each step, keep the top B sequences and pick the best overall. Better quality but slower.</p> <p>Instead of picking the single best token at each step, keep the top B sequences and pick the best overall. Better quality but slower.</p>
<button class="llm-btn" onclick="walkThrough('📐 Beam Search', 'Walk through beam search with beam size 2. Show how at each step, B sequences are expanded, pruned, and scored. Use a simple example with 3 possible tokens at each step. Show how beam search might find a better overall sequence than greedy.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Sampling</span> <span class="category">Sampling</span>
<h3>Logits</h3> <h3>Logits</h3>
<p>The raw, unnormalized scores the model outputs for each token before softmax. Can be adjusted for bias correction, repetition penalties, and custom sampling.</p> <p>The raw, unnormalized scores the model outputs for each token before softmax. Can be adjusted for bias correction, repetition penalties, and custom sampling.</p>
<button class="llm-btn" onclick="walkThrough('📐 Logits', 'Walk through logits step by step. If the model outputs logits [2.0, 1.0, 0.0] for [cat, dog, bird], show: 1) softmax to get probabilities, 2) log to get log-probabilities, 3) how temperature scales logits, 4) how top-k filtering modifies them.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<h2 class="section-title">Evaluation Metrics</h2> <h2 class="section-title">Evaluation Metrics</h2>
@@ -162,32 +212,32 @@
<h3>Perplexity</h3> <h3>Perplexity</h3>
<p>Measures how "surprised" the model is by test data. Lower is better. A perplexity of 100 means the model is as confused as choosing uniformly from 100 options.</p> <p>Measures how "surprised" the model is by test data. Lower is better. A perplexity of 100 means the model is as confused as choosing uniformly from 100 options.</p>
<div class="example"><strong>Example:</strong> Perplexity 5 on a language model means, on average, it's as uncertain as picking from 5 equally likely options at each step.</div> <div class="example"><strong>Example:</strong> Perplexity 5 on a language model means, on average, it's as uncertain as picking from 5 equally likely options at each step.</div>
<button class="llm-btn" onclick="walkThrough('📐 Perplexity', 'Walk through perplexity calculation step by step. If a model assigns probabilities [0.9, 0.8, 0.7, 0.6] to 4 correct tokens, show: 1) cross-entropy loss calculation, 2) how perplexity = 2^cross-entropy, 3) the final perplexity value and what it means.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Metrics</span> <span class="category">Metrics</span>
<h3>Accuracy</h3> <h3>Accuracy</h3>
<p>Percentage of correct predictions. Simple but can be misleading for imbalanced datasets.</p> <p>Percentage of correct predictions. Simple but can be misleading for imbalanced datasets.</p>
<button class="llm-btn" onclick="walkThrough('📐 Accuracy', 'Walk through accuracy with a concrete example. If you have 1000 examples where 950 are class A and 50 are class B, and your model predicts everything as class A, what is the accuracy? Why is this misleading? Show a better metric.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Metrics</span> <span class="category">Metrics</span>
<h3>Precision & Recall</h3> <h3>Precision & Recall</h3>
<p>Precision = of all positive predictions, how many were correct? Recall = of all actual positives, how many did we find?</p> <p>Precision = of all positive predictions, how many were correct? Recall = of all actual positives, how many did we find?</p>
<div class="example"><strong>Spam filter:</strong> High precision = few legitimate emails flagged. High recall = few spam emails missed.</div> <div class="example"><strong>Spam filter:</strong> High precision = few legitimate emails flagged. High recall = few spam emails missed.</div>
<button class="llm-btn" onclick="walkThrough('📐 Precision & Recall', 'Walk through precision and recall with a spam filter example. If the filter flags 100 emails as spam, and 80 are actually spam (20 are legitimate), and there are 10 total spam emails in the inbox, compute precision, recall, and F1.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Metrics</span> <span class="category">Metrics</span>
<h3>F1 Score</h3> <h3>F1 Score</h3>
<p>The harmonic mean of precision and recall. A single metric that balances both.</p> <p>The harmonic mean of precision and recall. A single metric that balances both.</p>
<button class="llm-btn" onclick="walkThrough('📐 F1 Score', 'Walk through F1 score calculation. If precision = 0.8 and recall = 0.6, show: 1) the harmonic mean formula, 2) the step-by-step calculation, 3) why harmonic mean is used instead of arithmetic mean, 4) what F1 = 0.7 means.')"><span class="icon">📐</span> Walk me through</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Metrics</span> <span class="category">Metrics</span>
<h3>BLEU / ROUGE</h3> <h3>BLEU / ROUGE</h3>
<p>Metrics for evaluating text generation quality by comparing model output to reference text. BLEU counts n-gram overlap (used for translation). ROUGE is similar but common for summarization.</p> <p>Metrics for evaluating text generation quality by comparing model output to reference text. BLEU counts n-gram overlap (used for translation). ROUGE is similar but common for summarization.</p>
</div> <button class="llm-btn" onclick="walkThrough('📐 BLEU Score', 'Walk through BLEU score calculation. If the model outputs "the cat sat on the mat" and the reference is "the cat sat on the mat", compute 1-gram, 2-gram, 3-gram, and 4-gram precision. Show the final BLEU score with the brevity penalty.')"><span class="icon">📐</span> Walk me through</button>
<div class="def-card">
<span class="category">Metrics</span>
<h3>Tokens per Second (TPS)</h3>
<p>How many tokens the model generates per second. Measures inference speed. Typical range: 20-100+ TPS depending on model size and hardware.</p>
</div> </div>
<h2 class="section-title">Key Formulas</h2> <h2 class="section-title">Key Formulas</h2>
@@ -210,5 +260,34 @@
<footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer> <footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer>
<script src="../lib/modal.js"></script>
<script src="../lib/llm.js"></script>
<script>
(function(){
function walkThrough(title, prompt) {
LLMModal.open(title);
var messages = [
{ role: 'system', content: 'You are an AI math tutor. Walk through the requested concept step by step with concrete numerical examples. Show every calculation explicitly. Use code blocks for math. Explain each step in plain language. Make it feel like a patient teacher working through a problem.' },
{ role: 'user', content: prompt }
];
var fullText = '';
LLM.callAPI(
messages,
function(chunk) {
fullText += chunk;
LLMModal.update(fullText);
},
function() {},
function(err) {
LLMModal.error(err);
}
);
}
window.walkThrough = walkThrough;
})();
</script>
</body> </body>
</html> </html>

View File

@@ -10,7 +10,7 @@
<nav> <nav>
<div class="nav-inner"> <div class="nav-inner">
<a href="/" class="nav-brand">AI Cheat Sheet</a> <a href="../index.html" class="nav-brand">AI Cheat Sheet</a>
<div class="nav-links"> <div class="nav-links">
<a href="/pages/terminology.html">Terminology</a> <a href="/pages/terminology.html">Terminology</a>
<a href="/pages/techniques.html">Techniques</a> <a href="/pages/techniques.html">Techniques</a>
@@ -19,11 +19,15 @@
<a href="/pages/prompts.html">Prompt Guide</a> <a href="/pages/prompts.html">Prompt Guide</a>
<a href="/pages/math.html">Math & Concepts</a> <a href="/pages/math.html">Math & Concepts</a>
<a href="/pages/chat.html">Chat</a> <a href="/pages/chat.html">Chat</a>
<a href="/pages/image-gen.html">Image Gen</a>
</div> </div>
<button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button> <button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button>
</div> </div>
</nav> </nav>
<button class="menu-toggle" id="menuToggle" aria-label="Toggle menu"></button>
<div class="sidebar-backdrop" id="sidebarBackdrop"></div>
<script> <script>
(function(){ (function(){
var btn = document.getElementById('darkToggle'); var btn = document.getElementById('darkToggle');
@@ -44,6 +48,34 @@
localStorage.setItem('theme','dark'); localStorage.setItem('theme','dark');
} }
}); });
var menuToggle = document.getElementById('menuToggle');
var nav = document.querySelector('nav');
var backdrop = document.getElementById('sidebarBackdrop');
if(menuToggle && nav){
menuToggle.addEventListener('click', function(){
nav.classList.toggle('sidebar-open');
var isOpen = nav.classList.contains('sidebar-open');
menuToggle.textContent = isOpen ? '✕' : '☰';
if(backdrop){
backdrop.classList.toggle('visible', isOpen);
}
});
if(backdrop){
backdrop.addEventListener('click', function(){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
backdrop.classList.remove('visible');
});
}
document.addEventListener('click', function(e){
if(nav.classList.contains('sidebar-open') && !nav.contains(e.target) && e.target !== menuToggle){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
if(backdrop) backdrop.classList.remove('visible');
}
});
}
})(); })();
</script> </script>
@@ -60,24 +92,28 @@
<h3>LLM (Large Language Model)</h3> <h3>LLM (Large Language Model)</h3>
<p>Neural networks based on the transformer architecture, trained on massive text corpora. They predict the next token given a sequence, enabling fluency in language tasks.</p> <p>Neural networks based on the transformer architecture, trained on massive text corpora. They predict the next token given a sequence, enabling fluency in language tasks.</p>
<div class="example"><strong>Examples:</strong> GPT-4, Claude, Gemini, Llama 3, Mistral, Qwen</div> <div class="example"><strong>Examples:</strong> GPT-4, Claude, Gemini, Llama 3, Mistral, Qwen</div>
<button class="llm-btn" onclick="explainModel('💬 LLM (Large Language Model)', 'Neural networks based on the transformer architecture, trained on massive text corpora. They predict the next token given a sequence.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Transformer</span> <span class="category">Transformer</span>
<h3>Encoder-Only Models</h3> <h3>Encoder-Only Models</h3>
<p>Transformers designed to understand input (not generate text). Used for classification, sentiment analysis, and embedding generation.</p> <p>Transformers designed to understand input (not generate text). Used for classification, sentiment analysis, and embedding generation.</p>
<div class="example"><strong>Examples:</strong> BERT, RoBERTa, DeBERTa</div> <div class="example"><strong>Examples:</strong> BERT, RoBERTa, DeBERTa</div>
<button class="llm-btn" onclick="explainModel('💬 Encoder-Only Models', 'Transformers designed to understand input (not generate text). Used for classification, sentiment analysis, and embedding generation.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Transformer</span> <span class="category">Transformer</span>
<h3>Decoder-Only Models</h3> <h3>Decoder-Only Models</h3>
<p>Transformers designed to generate text autoregressively — the dominant architecture for modern LLMs.</p> <p>Transformers designed to generate text autoregressively — the dominant architecture for modern LLMs.</p>
<div class="example"><strong>Examples:</strong> GPT series, Claude, Llama, Mistral</div> <div class="example"><strong>Examples:</strong> GPT series, Claude, Llama, Mistral</div>
<button class="llm-btn" onclick="explainModel('💬 Decoder-Only Models', 'Transformers designed to generate text autoregressively — the dominant architecture for modern LLMs.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Transformer</span> <span class="category">Transformer</span>
<h3>Encoder-Decoder Models</h3> <h3>Encoder-Decoder Models</h3>
<p>Transformers with both encoder and decoder, used for tasks that transform input to output (translation, summarization).</p> <p>Transformers with both encoder and decoder, used for tasks that transform input to output (translation, summarization).</p>
<div class="example"><strong>Examples:</strong> T5, BART, Flan-T5</div> <div class="example"><strong>Examples:</strong> T5, BART, Flan-T5</div>
<button class="llm-btn" onclick="explainModel('💬 Encoder-Decoder Models', 'Transformers with both encoder and decoder, used for tasks that transform input to output.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<h2 class="section-title">Vision Models</h2> <h2 class="section-title">Vision Models</h2>
@@ -86,24 +122,28 @@
<h3>CNN (Convolutional Neural Network)</h3> <h3>CNN (Convolutional Neural Network)</h3>
<p>Neural networks with layers that scan images with small filters, detecting edges, textures, and patterns hierarchically. The backbone of computer vision for years.</p> <p>Neural networks with layers that scan images with small filters, detecting edges, textures, and patterns hierarchically. The backbone of computer vision for years.</p>
<div class="example"><strong>Examples:</strong> ResNet, EfficientNet, VGG</div> <div class="example"><strong>Examples:</strong> ResNet, EfficientNet, VGG</div>
<button class="llm-btn" onclick="explainModel('💬 CNN', 'Neural networks with layers that scan images with small filters, detecting edges, textures, and patterns hierarchically.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Vision</span> <span class="category">Vision</span>
<h3>ViT (Vision Transformer)</h3> <h3>ViT (Vision Transformer)</h3>
<p>Applying the transformer architecture to images by treating image patches as tokens. Often outperforms CNNs at scale.</p> <p>Applying the transformer architecture to images by treating image patches as tokens. Often outperforms CNNs at scale.</p>
<div class="example"><strong>Examples:</strong> CLIP, DINOv2, ViT-Base</div> <div class="example"><strong>Examples:</strong> CLIP, DINOv2, ViT-Base</div>
<button class="llm-btn" onclick="explainModel('💬 ViT', 'Applying the transformer architecture to images by treating image patches as tokens.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Vision</span> <span class="category">Vision</span>
<h3>Diffusion Models</h3> <h3>Diffusion Models</h3>
<p>Models that generate images by iteratively denoising random noise. The architecture behind most state-of-the-art image generators.</p> <p>Models that generate images by iteratively denoising random noise. The architecture behind most state-of-the-art image generators.</p>
<div class="example"><strong>Examples:</strong> Stable Diffusion, DALL-E 3, Midjourney</div> <div class="example"><strong>Examples:</strong> Stable Diffusion, DALL-E 3, Midjourney</div>
<button class="llm-btn" onclick="explainModel('💬 Diffusion Models', 'Models that generate images by iteratively denoising random noise.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Vision</span> <span class="category">Vision</span>
<h3>Multimodal Models</h3> <h3>Multimodal Models</h3>
<p>Models that process multiple input types — text, images, audio — and can generate outputs across modalities.</p> <p>Models that process multiple input types — text, images, audio — and can generate outputs across modalities.</p>
<div class="example"><strong>Examples:</strong> GPT-4V (vision), Claude 3, Gemini, Qwen-VL</div> <div class="example"><strong>Examples:</strong> GPT-4V (vision), Claude 3, Gemini, Qwen-VL</div>
<button class="llm-btn" onclick="explainModel('💬 Multimodal Models', 'Models that process multiple input types — text, images, audio — and can generate outputs across modalities.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<h2 class="section-title">Generative Models</h2> <h2 class="section-title">Generative Models</h2>
@@ -112,18 +152,21 @@
<h3>GAN (Generative Adversarial Network)</h3> <h3>GAN (Generative Adversarial Network)</h3>
<p>Two networks compete: a generator creates fake data, and a discriminator tries to detect fakes. Over time, both improve until the generator is indistinguishable from real data.</p> <p>Two networks compete: a generator creates fake data, and a discriminator tries to detect fakes. Over time, both improve until the generator is indistinguishable from real data.</p>
<div class="example"><strong>Example:</strong> Creating photorealistic faces that don't exist (StyleGAN).</div> <div class="example"><strong>Example:</strong> Creating photorealistic faces that don't exist (StyleGAN).</div>
<button class="llm-btn" onclick="explainModel('💬 GAN', 'Two networks compete: a generator creates fake data, and a discriminator tries to detect fakes.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Generative</span> <span class="category">Generative</span>
<h3>VQ-VAE (Vector Quantized VAE)</h3> <h3>VQ-VAE (Vector Quantized VAE)</h3>
<p>Combines autoencoders with discrete codebooks to learn compressed representations. Used as a foundation for autoregressive generation.</p> <p>Combines autoencoders with discrete codebooks to learn compressed representations. Used as a foundation for autoregressive generation.</p>
<div class="example"><strong>Example:</strong> MusicGen (music generation), SoundStream (audio compression)</div> <div class="example"><strong>Example:</strong> MusicGen (music generation), SoundStream (audio compression)</div>
<button class="llm-btn" onclick="explainModel('💬 VQ-VAE', 'Combines autoencoders with discrete codebooks to learn compressed representations.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Generative</span> <span class="category">Generative</span>
<h3>Flow Models</h3> <h3>Flow Models</h3>
<p>Models that learn a reversible transformation between data and noise, enabling exact likelihood computation and fast generation.</p> <p>Models that learn a reversible transformation between data and noise, enabling exact likelihood computation and fast generation.</p>
<div class="example"><strong>Examples:</strong> Glow, RealNVP, Rectified Flow</div> <div class="example"><strong>Examples:</strong> Glow, RealNVP, Rectified Flow</div>
<button class="llm-btn" onclick="explainModel('💬 Flow Models', 'Models that learn a reversible transformation between data and noise.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<h2 class="section-title">Other Architectures</h2> <h2 class="section-title">Other Architectures</h2>
@@ -132,24 +175,28 @@
<h3>RNN / LSTM</h3> <h3>RNN / LSTM</h3>
<p>Recurrent networks that process sequences step-by-step, maintaining a hidden state. Largely replaced by transformers but still used in some applications.</p> <p>Recurrent networks that process sequences step-by-step, maintaining a hidden state. Largely replaced by transformers but still used in some applications.</p>
<div class="example"><strong>Use case:</strong> Time series prediction, speech recognition</div> <div class="example"><strong>Use case:</strong> Time series prediction, speech recognition</div>
<button class="llm-btn" onclick="explainModel('💬 RNN/LSTM', 'Recurrent networks that process sequences step-by-step, maintaining a hidden state.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Architecture</span> <span class="category">Architecture</span>
<h3>Mixture of Experts (MoE)</h3> <h3>Mixture of Experts (MoE)</h3>
<p>A model with multiple "expert" subnetworks. A routing mechanism selects which experts to use for each input, enabling large models that are computationally efficient at inference.</p> <p>A model with multiple "expert" subnetworks. A routing mechanism selects which experts to use for each input, enabling large models that are computationally efficient at inference.</p>
<div class="example"><strong>Examples:</strong> Mixtral 8x7B, Google's Switch Transformer, Grok-1</div> <div class="example"><strong>Examples:</strong> Mixtral 8x7B, Google's Switch Transformer, Grok-1</div>
<button class="llm-btn" onclick="explainModel('💬 Mixture of Experts (MoE)', 'A model with multiple expert subnetworks. A routing mechanism selects which experts to use for each input.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Architecture</span> <span class="category">Architecture</span>
<h3>Retrieval Models</h3> <h3>Retrieval Models</h3>
<p>Models designed specifically for semantic search — finding the most relevant documents for a query from a large corpus.</p> <p>Models designed specifically for semantic search — finding the most relevant documents for a query from a large corpus.</p>
<div class="example"><strong>Examples:</strong> BGE, E5, Cohere embed models</div> <div class="example"><strong>Examples:</strong> BGE, E5, Cohere embed models</div>
<button class="llm-btn" onclick="explainModel('💬 Retrieval Models', 'Models designed specifically for semantic search — finding the most relevant documents for a query.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Architecture</span> <span class="category">Architecture</span>
<h3>Small Language Models (SLMs)</h3> <h3>Small Language Models (SLMs)</h3>
<p>Compact language models (under 7B parameters) optimized for edge devices and low-latency applications. Getting remarkably capable.</p> <p>Compact language models (under 7B parameters) optimized for edge devices and low-latency applications. Getting remarkably capable.</p>
<div class="example"><strong>Examples:</strong> Phi-3, Gemma 2B, Qwen 1.5B, MicroLlama</div> <div class="example"><strong>Examples:</strong> Phi-3, Gemma 2B, Qwen 1.5B, MicroLlama</div>
<button class="llm-btn" onclick="explainModel('💬 Small Language Models (SLMs)', 'Compact language models (under 7B parameters) optimized for edge devices and low-latency applications.')"><span class="icon">💬</span> Explain</button>
</div> </div>
<h2 class="section-title">Model Comparison</h2> <h2 class="section-title">Model Comparison</h2>
@@ -171,9 +218,93 @@
</tbody> </tbody>
</table> </table>
<h2 class="section-title">AI Assistant</h2>
<div class="def-card">
<span class="category">Interactive</span>
<h3>🤖 Which Model Should I Use?</h3>
<p>Describe your task, constraints, and goals — the AI will recommend the best model architecture and specific model from the comparison table.</p>
<div class="llm-mini-chat" id="model-chat">
<div class="llm-mini-chat-header"><h4>🤖 Model Advisor</h4><button class="llm-close-btn" onclick="this.closest(\'.llm-mini-chat\').classList.remove(\'visible\')"></button></div>
<div class="llm-mini-chat-input-row">
<input class="llm-mini-chat-input" id="model-input" placeholder="e.g., I need to classify customer reviews by sentiment, processing 10,000 per day..." />
<button class="llm-mini-chat-send" onclick="askModelAdvisor()">Ask</button>
</div>
<div class="llm-mini-chat-output" id="model-output" style="margin-top: 0.8rem;"></div>
</div>
</div>
</div> </div>
<footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer> <footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer>
<script src="../lib/modal.js"></script>
<script src="../lib/llm.js"></script>
<script>
(function(){
function explainModel(title, definition) {
LLMModal.open(title);
var messages = [
{ role: 'system', content: 'You are an AI educator explaining model architectures. Explain how this model type works, what makes it unique, when to use it vs alternatives, and concrete examples. Use analogies and keep it practical.' },
{ role: 'user', content: 'Explain this AI model architecture: ' + title + '. ' + definition + '. Compare it to similar architectures and explain when you would choose this one over alternatives.' }
];
var fullText = '';
LLM.callAPI(
messages,
function(chunk) {
fullText += chunk;
LLMModal.update(fullText);
},
function() {},
function(err) {
LLMModal.error(err);
}
);
}
function askModelAdvisor() {
var input = document.getElementById('model-input');
var output = document.getElementById('model-output');
var text = input.value.trim();
if (!text) return;
output.innerHTML = '<span class="llm-loading">Analyzing...</span>';
var comparisonTable = `
Model Comparison:
- GPT-4 / GPT-4o: Decoder LLM - General-purpose reasoning, coding, multimodal
- Claude 3.5: Decoder LLM - Long-context analysis, coding, writing
- Gemini 1.5 Pro: Decoder LLM - Massive context windows, multimodal
- Llama 3: Decoder LLM - Open-source, self-hosting, fine-tuning
- Mistral Large: Dense LLM - High-quality reasoning, multilingual
- Stable Diffusion: Diffusion - Image generation, open-source
- CLIP: Encoder (Vision+Text) - Image-text matching, embeddings
- BERT: Encoder - Text classification, search, NLU
- Whisper: Encoder-Decoder - Speech recognition, transcription
- TTS models: Decoder - Text-to-speech, voice synthesis`;
var messages = [
{ role: 'system', content: 'You are an AI model advisor. Help users choose the right AI model for their task. Consider: 1) The task type (text, image, audio, structured data), 2) Scale/volume requirements, 3) Latency needs, 4) Budget constraints, 5) Open-source vs proprietary, 6) Deployment environment. Recommend specific models from the comparison table and explain why.' },
{ role: 'user', content: 'I need help choosing an AI model for my task: ' + text + '. Here are the available model options:\n' + comparisonTable + '\nWhich model should I use and why?' }
];
LLM.chatWithHistory('model-output', messages).catch(function() {});
}
var modelInput = document.getElementById('model-input');
if (modelInput) {
modelInput.addEventListener('keydown', function(e) {
if (e.key === 'Enter') {
e.preventDefault();
askModelAdvisor();
}
});
}
window.explainModel = explainModel;
window.askModelAdvisor = askModelAdvisor;
})();
</script>
</body> </body>
</html> </html>

View File

@@ -10,7 +10,7 @@
<nav> <nav>
<div class="nav-inner"> <div class="nav-inner">
<a href="/" class="nav-brand">AI Cheat Sheet</a> <a href="../index.html" class="nav-brand">AI Cheat Sheet</a>
<div class="nav-links"> <div class="nav-links">
<a href="/pages/terminology.html">Terminology</a> <a href="/pages/terminology.html">Terminology</a>
<a href="/pages/techniques.html">Techniques</a> <a href="/pages/techniques.html">Techniques</a>
@@ -19,11 +19,15 @@
<a href="/pages/prompts.html" class="active">Prompt Guide</a> <a href="/pages/prompts.html" class="active">Prompt Guide</a>
<a href="/pages/math.html">Math & Concepts</a> <a href="/pages/math.html">Math & Concepts</a>
<a href="/pages/chat.html">Chat</a> <a href="/pages/chat.html">Chat</a>
<a href="/pages/image-gen.html">Image Gen</a>
</div> </div>
<button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button> <button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button>
</div> </div>
</nav> </nav>
<button class="menu-toggle" id="menuToggle" aria-label="Toggle menu"></button>
<div class="sidebar-backdrop" id="sidebarBackdrop"></div>
<script> <script>
(function(){ (function(){
var btn = document.getElementById('darkToggle'); var btn = document.getElementById('darkToggle');
@@ -44,6 +48,34 @@
localStorage.setItem('theme','dark'); localStorage.setItem('theme','dark');
} }
}); });
var menuToggle = document.getElementById('menuToggle');
var nav = document.querySelector('nav');
var backdrop = document.getElementById('sidebarBackdrop');
if(menuToggle && nav){
menuToggle.addEventListener('click', function(){
nav.classList.toggle('sidebar-open');
var isOpen = nav.classList.contains('sidebar-open');
menuToggle.textContent = isOpen ? '✕' : '☰';
if(backdrop){
backdrop.classList.toggle('visible', isOpen);
}
});
if(backdrop){
backdrop.addEventListener('click', function(){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
backdrop.classList.remove('visible');
});
}
document.addEventListener('click', function(e){
if(nav.classList.contains('sidebar-open') && !nav.contains(e.target) && e.target !== menuToggle){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
if(backdrop) backdrop.classList.remove('visible');
}
});
}
})(); })();
</script> </script>
@@ -174,9 +206,102 @@
Then rewrite it incorporating your feedback."</div> Then rewrite it incorporating your feedback."</div>
</div> </div>
<h2 class="section-title">Playground</h2>
<div class="def-card">
<span class="category">Interactive</span>
<h3>🧪 Test Your Prompts Live</h3>
<p>Try any prompt technique with your configured model. Paste a prompt template, fill in the variables, and see the result.</p>
<div class="llm-mini-chat visible" id="prompt-playground">
<div class="llm-mini-chat-header"><h4>🧪 Prompt Playground</h4><button class="llm-close-btn" onclick="this.closest(\'.llm-mini-chat\').classList.remove(\'visible\')"></button></div>
<div style="margin-bottom: 0.8rem;">
<label style="font-size: 0.8rem; font-weight: 700; color: var(--pink-600); text-transform: uppercase; letter-spacing: 0.5px; display: block; margin-bottom: 0.3rem;">Choose a technique</label>
<select id="prompt-technique" style="width: 100%; padding: 0.5rem 0.8rem; border: 2px solid var(--pink-200); border-radius: 10px; font-size: 0.88rem; color: var(--pink-900); background: var(--pink-50);">
<option value="zero-shot">Zero-Shot</option>
<option value="few-shot">Few-Shot</option>
<option value="chain-of-thought">Chain-of-Thought</option>
<option value="role">Role Prompting</option>
<option value="structured">Structured Output</option>
</select>
</div>
<div style="margin-bottom: 0.8rem;">
<label style="font-size: 0.8rem; font-weight: 700; color: var(--pink-600); text-transform: uppercase; letter-spacing: 0.5px; display: block; margin-bottom: 0.3rem;">Your prompt</label>
<textarea class="llm-mini-chat-input" id="prompt-input" rows="4" placeholder="Enter your prompt here..."></textarea>
</div>
<button class="llm-mini-chat-send" onclick="testPrompt()" style="width: 100%; padding: 0.7rem;">Send to LLM</button>
<div class="llm-mini-chat-output" id="prompt-output" style="margin-top: 0.8rem;"></div>
</div>
</div>
</div> </div>
<footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer> <footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer>
<script src="../lib/modal.js"></script>
<script src="../lib/llm.js"></script>
<script>
(function(){
var techniquePrompts = {
'zero-shot': 'Perform the task I describe. Do not add examples. Just answer directly.',
'few-shot': 'Classify the sentiment of the following text. Here are examples:\n\n"I love this!" → Positive\n"This is terrible." → Negative\n"It\'s okay, I guess." → ?\n\n{text}\n→',
'chain-of-thought': 'Solve this step by step. Think through each step carefully before giving your final answer.\n\n{task}',
'role': 'You are an expert in {domain}. {task}',
'structured': 'Analyze the following text and return results as JSON with these keys: summary, sentiment, key_topics, action_items.\n\n{text}'
};
function applyTechnique() {
var technique = document.getElementById('prompt-technique').value;
var basePrompt = techniquePrompts[technique];
var userPrompt = document.getElementById('prompt-input').value.trim();
if (!userPrompt) return basePrompt;
return basePrompt.replace('{text}', userPrompt).replace('{task}', userPrompt).replace('{domain}', 'general');
}
function testPrompt() {
var userPrompt = document.getElementById('prompt-input').value.trim();
var technique = document.getElementById('prompt-technique').value;
if (!userPrompt) {
LLMModal.open('🧪 Prompt Playground');
LLMModal.error('Please enter a prompt to test.');
return;
}
var techniqueNames = {
'zero-shot': 'Zero-Shot',
'few-shot': 'Few-Shot',
'chain-of-thought': 'Chain-of-Thought',
'role': 'Role Prompting',
'structured': 'Structured Output'
};
LLMModal.open('🧪 Testing: ' + techniqueNames[technique]);
var finalPrompt = applyTechnique();
var systemPrompt = 'You are a helpful AI assistant. Respond to the prompt below using the specified technique.';
var messages = [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: finalPrompt }
];
var fullText = '';
LLM.callAPI(
messages,
function(chunk) {
fullText += chunk;
LLMModal.update(fullText);
},
function() {},
function(err) {
LLMModal.error(err);
}
);
}
window.testPrompt = testPrompt;
})();
</script>
</body> </body>
</html> </html>

View File

@@ -10,7 +10,7 @@
<nav> <nav>
<div class="nav-inner"> <div class="nav-inner">
<a href="/" class="nav-brand">AI Cheat Sheet</a> <a href="../index.html" class="nav-brand">AI Cheat Sheet</a>
<div class="nav-links"> <div class="nav-links">
<a href="/pages/terminology.html">Terminology</a> <a href="/pages/terminology.html">Terminology</a>
<a href="/pages/techniques.html" class="active">Techniques</a> <a href="/pages/techniques.html" class="active">Techniques</a>
@@ -19,11 +19,15 @@
<a href="/pages/prompts.html">Prompt Guide</a> <a href="/pages/prompts.html">Prompt Guide</a>
<a href="/pages/math.html">Math & Concepts</a> <a href="/pages/math.html">Math & Concepts</a>
<a href="/pages/chat.html">Chat</a> <a href="/pages/chat.html">Chat</a>
<a href="/pages/image-gen.html">Image Gen</a>
</div> </div>
<button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button> <button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button>
</div> </div>
</nav> </nav>
<button class="menu-toggle" id="menuToggle" aria-label="Toggle menu"></button>
<div class="sidebar-backdrop" id="sidebarBackdrop"></div>
<script> <script>
(function(){ (function(){
var btn = document.getElementById('darkToggle'); var btn = document.getElementById('darkToggle');
@@ -44,6 +48,34 @@
localStorage.setItem('theme','dark'); localStorage.setItem('theme','dark');
} }
}); });
var menuToggle = document.getElementById('menuToggle');
var nav = document.querySelector('nav');
var backdrop = document.getElementById('sidebarBackdrop');
if(menuToggle && nav){
menuToggle.addEventListener('click', function(){
nav.classList.toggle('sidebar-open');
var isOpen = nav.classList.contains('sidebar-open');
menuToggle.textContent = isOpen ? '✕' : '☰';
if(backdrop){
backdrop.classList.toggle('visible', isOpen);
}
});
if(backdrop){
backdrop.addEventListener('click', function(){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
backdrop.classList.remove('visible');
});
}
document.addEventListener('click', function(e){
if(nav.classList.contains('sidebar-open') && !nav.contains(e.target) && e.target !== menuToggle){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
if(backdrop) backdrop.classList.remove('visible');
}
});
}
})(); })();
</script> </script>
@@ -60,32 +92,38 @@
<h3>Backpropagation</h3> <h3>Backpropagation</h3>
<p>The core algorithm for training neural networks. It calculates the gradient of the loss function with respect to each weight by chain rule, then adjusts weights to minimize error.</p> <p>The core algorithm for training neural networks. It calculates the gradient of the loss function with respect to each weight by chain rule, then adjusts weights to minimize error.</p>
<div class="example"><strong>Analogy:</strong> Like adjusting a radio dial — you turn it slightly, check if the signal is clearer, and keep adjusting in the right direction.</div> <div class="example"><strong>Analogy:</strong> Like adjusting a radio dial — you turn it slightly, check if the signal is clearer, and keep adjusting in the right direction.</div>
<button class="llm-btn" onclick="demoTechnique('🧪 Backpropagation', 'Explain backpropagation in neural networks using a simple analogy. Walk through a numerical example: if the loss is 0.5 and the gradient is -0.03, what happens to the weight? Show the math.')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Training</span> <span class="category">Training</span>
<h3>Epoch</h3> <h3>Epoch</h3>
<p>One complete pass through the entire training dataset. Models typically train for many epochs.</p> <p>One complete pass through the entire training dataset. Models typically train for many epochs.</p>
<button class="llm-btn" onclick="demoTechnique('🧪 Epochs', 'Explain what an epoch is in training. Walk through a concrete example: if you have 10,000 training examples and a batch size of 32, how many steps per epoch? How many total steps for 10 epochs?')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Training</span> <span class="category">Training</span>
<h3>Batch Size</h3> <h3>Batch Size</h3>
<p>The number of training examples processed before the model's weights are updated. Larger batches are more stable but use more memory.</p> <p>The number of training examples processed before the model's weights are updated. Larger batches are more stable but use more memory.</p>
<button class="llm-btn" onclick="demoTechnique('🧪 Batch Size', 'Explain batch size in training. Compare batch sizes of 1, 32, and 256 — what are the tradeoffs for each in terms of training speed, memory, and convergence quality?')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Training</span> <span class="category">Training</span>
<h3>Learning Rate</h3> <h3>Learning Rate</h3>
<p>A hyperparameter that controls how much to adjust weights during each update. Too high → unstable training; too low → slow convergence.</p> <p>A hyperparameter that controls how much to adjust weights during each update. Too high → unstable training; too low → slow convergence.</p>
<button class="llm-btn" onclick="demoTechnique('🧪 Learning Rate', 'Explain learning rate with a visual analogy. Show what happens numerically: if the learning rate is 0.1 vs 0.001, and the gradient is 5, what is the weight update in each case?')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Training</span> <span class="category">Training</span>
<h3>Transfer Learning</h3> <h3>Transfer Learning</h3>
<p>Using a model trained on one task as the starting point for a model on a second task. Saves time and data.</p> <p>Using a model trained on one task as the starting point for a model on a second task. Saves time and data.</p>
<div class="example"><strong>Example:</strong> A model trained on Wikipedia text is fine-tuned for legal document analysis.</div> <div class="example"><strong>Example:</strong> A model trained on Wikipedia text is fine-tuned for legal document analysis.</div>
<button class="llm-btn" onclick="demoTechnique('🧪 Transfer Learning', 'Explain transfer learning with a concrete example. If someone wanted to build a sentiment analysis model for restaurant reviews using transfer learning, walk through the steps from pre-trained model to final fine-tuned model.')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Training</span> <span class="category">Training</span>
<h3>Data Augmentation</h3> <h3>Data Augmentation</h3>
<p>Artificially expanding a training dataset by applying transformations (e.g., rotation, flipping, synonym replacement) to create new training examples.</p> <p>Artificially expanding a training dataset by applying transformations (e.g., rotation, flipping, synonym replacement) to create new training examples.</p>
<button class="llm-btn" onclick="demoTechnique('🧪 Data Augmentation', 'Explain data augmentation. Give me 5 concrete examples of how you would augment a dataset of 100 product reviews to create 500 training examples. Include both text and non-text techniques.')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<h2 class="section-title">Alignment & Improvement</h2> <h2 class="section-title">Alignment & Improvement</h2>
@@ -94,22 +132,26 @@
<h3>RLHF (Reinforcement Learning from Human Feedback)</h3> <h3>RLHF (Reinforcement Learning from Human Feedback)</h3>
<p>A technique to align model outputs with human preferences. Humans rank model responses, and a reward model is trained on those rankings. The main model is then fine-tuned to maximize the reward.</p> <p>A technique to align model outputs with human preferences. Humans rank model responses, and a reward model is trained on those rankings. The main model is then fine-tuned to maximize the reward.</p>
<div class="example"><strong>Used by:</strong> ChatGPT, Claude, and other conversational AI systems to make them more helpful and harmless.</div> <div class="example"><strong>Used by:</strong> ChatGPT, Claude, and other conversational AI systems to make them more helpful and harmless.</div>
<button class="llm-btn" onclick="demoTechnique('🧪 RLHF', 'Walk me through RLHF step by step with a concrete example. Start with a raw model that gives bad answers, show how human feedback improves it, and explain the reward model training process.')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Alignment</span> <span class="category">Alignment</span>
<h3>SFT (Supervised Fine-Tuning)</h3> <h3>SFT (Supervised Fine-Tuning)</h3>
<p>Fine-tuning a model on a dataset of input-output pairs to teach it a specific format or style of response.</p> <p>Fine-tuning a model on a dataset of input-output pairs to teach it a specific format or style of response.</p>
<div class="example"><strong>Example:</strong> Training a model to respond in JSON format for API integration.</div> <div class="example"><strong>Example:</strong> Training a model to respond in JSON format for API integration.</div>
<button class="llm-btn" onclick="demoTechnique('🧪 SFT', 'Show me an example of SFT (Supervised Fine-Tuning). Give me 5 example input-output pairs for fine-tuning a model to act as a customer support agent, then explain what the model learns from these.')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Alignment</span> <span class="category">Alignment</span>
<h3>Prompt Tuning</h3> <h3>Prompt Tuning</h3>
<p>Instead of changing model weights, carefully crafting prompts to guide the model's behavior. Zero-cost and reversible.</p> <p>Instead of changing model weights, carefully crafting prompts to guide the model's behavior. Zero-cost and reversible.</p>
<button class="llm-btn" onclick="demoTechnique('🧪 Prompt Tuning', 'Compare prompt tuning vs fine-tuning. Show 3 examples where the same task (e.g., sentiment analysis) is handled with just a prompt vs with fine-tuning — what are the pros and cons of each?')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Alignment</span> <span class="category">Alignment</span>
<h3>LoRA (Low-Rank Adaptation)</h3> <h3>LoRA (Low-Rank Adaptation)</h3>
<p>An efficient fine-tuning technique that adds small trainable matrices to a frozen pre-trained model, drastically reducing compute and memory needs.</p> <p>An efficient fine-tuning technique that adds small trainable matrices to a frozen pre-trained model, drastically reducing compute and memory needs.</p>
<button class="llm-btn" onclick="demoTechnique('🧪 LoRA', 'Explain LoRA (Low-Rank Adaptation) simply. If a model has 7 billion parameters, how many parameters does LoRA actually train? Walk through the math and explain why this is so much more efficient than full fine-tuning.')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<h2 class="section-title">Deployment & Optimization</h2> <h2 class="section-title">Deployment & Optimization</h2>
@@ -118,39 +160,74 @@
<h3>Quantization</h3> <h3>Quantization</h3>
<p>Reducing the precision of model weights (e.g., from 32-bit to 8-bit) to shrink model size and speed up inference with minimal accuracy loss.</p> <p>Reducing the precision of model weights (e.g., from 32-bit to 8-bit) to shrink model size and speed up inference with minimal accuracy loss.</p>
<div class="example"><strong>Example:</strong> A 13GB model quantized to 4-bit becomes ~3.5GB, fitting on consumer GPUs.</div> <div class="example"><strong>Example:</strong> A 13GB model quantized to 4-bit becomes ~3.5GB, fitting on consumer GPUs.</div>
<button class="llm-btn" onclick="demoTechnique('🧪 Quantization', 'Explain quantization with concrete numbers. If a model has 7 billion parameters stored as 32-bit floats, what is the total size? What happens at 16-bit, 8-bit, and 4-bit? Show the accuracy tradeoffs.')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Optimization</span> <span class="category">Optimization</span>
<h3>Distillation</h3> <h3>Distillation</h3>
<p>Training a smaller "student" model to mimic the behavior of a larger "teacher" model, capturing its knowledge in a more compact form.</p> <p>Training a smaller "student" model to mimic the behavior of a larger "teacher" model, capturing its knowledge in a more compact form.</p>
<button class="llm-btn" onclick="demoTechnique('🧪 Knowledge Distillation', 'Explain knowledge distillation with a concrete example. If GPT-4 is the teacher and a 350M parameter model is the student, how does the student learn? Show a specific example of soft vs hard labels.')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Optimization</span> <span class="category">Optimization</span>
<h3>Speculative Decoding</h3> <h3>Speculative Decoding</h3>
<p>Using a small model to draft multiple tokens, then having the large model verify them in parallel — speeding up generation.</p> <p>Using a small model to draft multiple tokens, then having the large model verify them in parallel — speeding up generation.</p>
<button class="llm-btn" onclick="demoTechnique('🧪 Speculative Decoding', 'Explain speculative decoding with a step-by-step example. If the small model drafts ["the", "cat"] and the large model verifies both, how many forward passes does this save? Walk through a full sequence.')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Architecture</span> <span class="category">Architecture</span>
<h3>RAG (Retrieval-Augmented Generation)</h3> <h3>RAG (Retrieval-Augmented Generation)</h3>
<p>Augmenting a language model with an external knowledge retrieval step. The model first searches a knowledge base, then generates a response using both the retrieved info and its own training.</p> <p>Augmenting a language model with an external knowledge retrieval step. The model first searches a knowledge base, then generates a response using both the retrieved info and its own training.</p>
<div class="example"><strong>Example:</strong> A customer support bot that searches your product docs before answering questions — no fine-tuning needed.</div> <div class="example"><strong>Example:</strong> A customer support bot that searches your product docs before answering questions — no fine-tuning needed.</div>
<button class="llm-btn" onclick="demoTechnique('🧪 RAG', 'Walk through a complete RAG pipeline step by step: given a user question "How do I reset my password?", show how the retrieval system works, what gets retrieved, and how the LLM generates the final answer using the retrieved docs.')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Architecture</span> <span class="category">Architecture</span>
<h3>Agent / Tool Use</h3> <h3>Agent / Tool Use</h3>
<p>Giving an LLM the ability to call external tools (search, calculators, APIs) to accomplish multi-step tasks.</p> <p>Giving an LLM the ability to call external tools (search, calculators, APIs) to accomplish multi-step tasks.</p>
<div class="example"><strong>Example:</strong> An AI that searches the web, summarizes results, and writes a report — all autonomously.</div> <div class="example"><strong>Example:</strong> An AI that searches the web, summarizes results, and writes a report — all autonomously.</div>
<button class="llm-btn" onclick="demoTechnique('🧪 Agent / Tool Use', 'Show me a concrete example of an AI agent with tool use. Walk through how an agent plans and executes the task "Find today'"'"'s weather in Tokyo and suggest what to wear" show each tool call, observation, and thought step.')"><span class="icon">🧪</span> Try it</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Architecture</span> <span class="category">Architecture</span>
<h3>Chain-of-Thought</h3> <h3>Chain-of-Thought</h3>
<p>Asking a model to show its reasoning step-by-step before giving an answer. Dramatically improves performance on reasoning tasks.</p> <p>Asking a model to show its reasoning step-by-step before giving an answer. Dramatically improves performance on reasoning tasks.</p>
<div class="example"><strong>Prompt:</strong> "Let's think step by step. First, ..."</div> <div class="example"><strong>Prompt:</strong> "Let'"'"'s think step by step. First, ..."</div>
<button class="llm-btn" onclick="demoTechnique('🧪 Chain-of-Thought', 'Demonstrate chain-of-thought prompting. Give me a math word problem, then show: 1) the answer without CoT, 2) the answer with CoT showing each reasoning step. Explain why CoT improves accuracy.')"><span class="icon">🧪</span> Try it</button>
</div> </div>
</div> </div>
<footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer> <footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer>
<script src="../lib/modal.js"></script>
<script src="../lib/llm.js"></script>
<script>
(function(){
function demoTechnique(title, prompt) {
LLMModal.open(title);
var messages = [
{ role: 'system', content: 'You are an AI educator running a live demo. Walk through the concept step by step with concrete numerical examples, analogies, and clear explanations. Use formatting like bold text and code blocks where helpful.' },
{ role: 'user', content: prompt }
];
var fullText = '';
LLM.callAPI(
messages,
function(chunk) {
fullText += chunk;
LLMModal.update(fullText);
},
function() {},
function(err) {
LLMModal.error(err);
}
);
}
window.demoTechnique = demoTechnique;
})();
</script>
</body> </body>
</html> </html>

View File

@@ -10,7 +10,7 @@
<nav> <nav>
<div class="nav-inner"> <div class="nav-inner">
<a href="/" class="nav-brand">AI Cheat Sheet</a> <a href="../index.html" class="nav-brand">AI Cheat Sheet</a>
<div class="nav-links"> <div class="nav-links">
<a href="/pages/terminology.html" class="active">Terminology</a> <a href="/pages/terminology.html" class="active">Terminology</a>
<a href="/pages/techniques.html">Techniques</a> <a href="/pages/techniques.html">Techniques</a>
@@ -19,11 +19,15 @@
<a href="/pages/prompts.html">Prompt Guide</a> <a href="/pages/prompts.html">Prompt Guide</a>
<a href="/pages/math.html">Math & Concepts</a> <a href="/pages/math.html">Math & Concepts</a>
<a href="/pages/chat.html">Chat</a> <a href="/pages/chat.html">Chat</a>
<a href="/pages/image-gen.html">Image Gen</a>
</div> </div>
<button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button> <button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button>
</div> </div>
</nav> </nav>
<button class="menu-toggle" id="menuToggle" aria-label="Toggle menu"></button>
<div class="sidebar-backdrop" id="sidebarBackdrop"></div>
<script> <script>
(function(){ (function(){
var btn = document.getElementById('darkToggle'); var btn = document.getElementById('darkToggle');
@@ -44,6 +48,34 @@
localStorage.setItem('theme','dark'); localStorage.setItem('theme','dark');
} }
}); });
var menuToggle = document.getElementById('menuToggle');
var nav = document.querySelector('nav');
var backdrop = document.getElementById('sidebarBackdrop');
if(menuToggle && nav){
menuToggle.addEventListener('click', function(){
nav.classList.toggle('sidebar-open');
var isOpen = nav.classList.contains('sidebar-open');
menuToggle.textContent = isOpen ? '✕' : '☰';
if(backdrop){
backdrop.classList.toggle('visible', isOpen);
}
});
if(backdrop){
backdrop.addEventListener('click', function(){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
backdrop.classList.remove('visible');
});
}
document.addEventListener('click', function(e){
if(nav.classList.contains('sidebar-open') && !nav.contains(e.target) && e.target !== menuToggle){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
if(backdrop) backdrop.classList.remove('visible');
}
});
}
})(); })();
</script> </script>
@@ -59,34 +91,40 @@
<span class="category">ML</span> <span class="category">ML</span>
<h3>Machine Learning (ML)</h3> <h3>Machine Learning (ML)</h3>
<p>A subset of AI where systems learn patterns from data to make decisions or predictions without being explicitly programmed for each task.</p> <p>A subset of AI where systems learn patterns from data to make decisions or predictions without being explicitly programmed for each task.</p>
<button class="llm-btn" onclick="explainTerm('Machine Learning (ML)', 'A subset of AI where systems learn patterns from data to make decisions or predictions without being explicitly programmed for each task.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">ML</span> <span class="category">ML</span>
<h3>Supervised Learning</h3> <h3>Supervised Learning</h3>
<p>Training a model on labeled data — each example has an input and a known correct output. The model learns to map inputs to outputs.</p> <p>Training a model on labeled data — each example has an input and a known correct output. The model learns to map inputs to outputs.</p>
<div class="example"><strong>Example:</strong> Training on emails labeled "spam" or "not spam" to build a spam filter.</div> <div class="example"><strong>Example:</strong> Training on emails labeled "spam" or "not spam" to build a spam filter.</div>
<button class="llm-btn" onclick="explainTerm('Supervised Learning', 'Training a model on labeled data — each example has an input and a known correct output. The model learns to map inputs to outputs.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">ML</span> <span class="category">ML</span>
<h3>Unsupervised Learning</h3> <h3>Unsupervised Learning</h3>
<p>Training on unlabeled data — the model finds hidden patterns or groupings on its own.</p> <p>Training on unlabeled data — the model finds hidden patterns or groupings on its own.</p>
<div class="example"><strong>Example:</strong> Grouping customers by purchasing behavior without pre-defined categories.</div> <div class="example"><strong>Example:</strong> Grouping customers by purchasing behavior without pre-defined categories.</div>
<button class="llm-btn" onclick="explainTerm('Unsupervised Learning', 'Training on unlabeled data — the model finds hidden patterns or groupings on its own.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">ML</span> <span class="category">ML</span>
<h3>Reinforcement Learning</h3> <h3>Reinforcement Learning</h3>
<p>An agent learns by interacting with an environment, receiving rewards for good actions and penalties for bad ones, optimizing for maximum cumulative reward.</p> <p>An agent learns by interacting with an environment, receiving rewards for good actions and penalties for bad ones, optimizing for maximum cumulative reward.</p>
<div class="example"><strong>Example:</strong> An AI learning to play chess by playing millions of games against itself.</div> <div class="example"><strong>Example:</strong> An AI learning to play chess by playing millions of games against itself.</div>
<button class="llm-btn" onclick="explainTerm('Reinforcement Learning', 'An agent learns by interacting with an environment, receiving rewards for good actions and penalties for bad ones, optimizing for maximum cumulative reward.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">ML</span> <span class="category">ML</span>
<h3>Overfitting</h3> <h3>Overfitting</h3>
<p>When a model learns the training data too well — including noise and outliers — and performs poorly on new, unseen data.</p> <p>When a model learns the training data too well — including noise and outliers — and performs poorly on new, unseen data.</p>
<button class="llm-btn" onclick="explainTerm('Overfitting', 'When a model learns the training data too well — including noise and outliers — and performs poorly on new, unseen data.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">ML</span> <span class="category">ML</span>
<h3>Underfitting</h3> <h3>Underfitting</h3>
<p>When a model is too simple to capture the patterns in the data, performing poorly on both training and test data.</p> <p>When a model is too simple to capture the patterns in the data, performing poorly on both training and test data.</p>
<button class="llm-btn" onclick="explainTerm('Underfitting', 'When a model is too simple to capture the patterns in the data, performing poorly on both training and test data.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<h2 class="section-title">Natural Language Processing</h2> <h2 class="section-title">Natural Language Processing</h2>
@@ -94,35 +132,41 @@
<span class="category">NLP</span> <span class="category">NLP</span>
<h3>NLP (Natural Language Processing)</h3> <h3>NLP (Natural Language Processing)</h3>
<p>A field of AI focused on enabling computers to understand, interpret, and generate human language.</p> <p>A field of AI focused on enabling computers to understand, interpret, and generate human language.</p>
<button class="llm-btn" onclick="explainTerm('NLP - Natural Language Processing', 'A field of AI focused on enabling computers to understand, interpret, and generate human language.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">NLP</span> <span class="category">NLP</span>
<h3>Token</h3> <h3>Token</h3>
<p>The smallest unit of text a model processes. Tokens can be words, subwords, or characters. A single word may be split into multiple tokens.</p> <p>The smallest unit of text a model processes. Tokens can be words, subwords, or characters. A single word may be split into multiple tokens.</p>
<div class="example"><strong>Example:</strong> "unhappiness" might become ["un", "happiness"] — 2 tokens.</div> <div class="example"><strong>Example:</strong> "unhappiness" might become ["un", "happiness"] — 2 tokens.</div>
<button class="llm-btn" onclick="explainTerm('Token', 'The smallest unit of text a model processes. Tokens can be words, subwords, or characters. A single word may be split into multiple tokens.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">NLP</span> <span class="category">NLP</span>
<h3>Embedding</h3> <h3>Embedding</h3>
<p>A numerical representation of text (or other data) in a continuous vector space, where similar items are closer together.</p> <p>A numerical representation of text (or other data) in a continuous vector space, where similar items are closer together.</p>
<div class="example"><strong>Example:</strong> "king", "queen", "man", "woman" are embedded so that queen - woman + man ≈ king.</div> <div class="example"><strong>Example:</strong> "king", "queen", "man", "woman" are embedded so that queen - woman + man ≈ king.</div>
<button class="llm-btn" onclick="explainTerm('Embedding', 'A numerical representation of text (or other data) in a continuous vector space, where similar items are closer together.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">NLP</span> <span class="category">NLP</span>
<h3>Context Window</h3> <h3>Context Window</h3>
<p>The maximum number of tokens a model can process at once — both input and output combined.</p> <p>The maximum number of tokens a model can process at once — both input and output combined.</p>
<div class="example"><strong>Example:</strong> A 128K context window means the model can read ~100,000 words in a single prompt.</div> <div class="example"><strong>Example:</strong> A 128K context window means the model can read ~100,000 words in a single prompt.</div>
<button class="llm-btn" onclick="explainTerm('Context Window', 'The maximum number of tokens a model can process at once — both input and output combined.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">NLP</span> <span class="category">NLP</span>
<h3>Paraphrasing</h3> <h3>Paraphrasing</h3>
<p>Restating text in different words while preserving the original meaning. LLMs excel at this task.</p> <p>Restating text in different words while preserving the original meaning. LLMs excel at this task.</p>
<button class="llm-btn" onclick="explainTerm('Paraphrasing', 'Restating text in different words while preserving the original meaning. LLMs excel at this task.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">NLP</span> <span class="category">NLP</span>
<h3>Sentiment Analysis</h3> <h3>Sentiment Analysis</h3>
<p>Determining the emotional tone behind text — positive, negative, or neutral.</p> <p>Determining the emotional tone behind text — positive, negative, or neutral.</p>
<div class="example"><strong>Example:</strong> "This product is amazing!" → Positive</div> <div class="example"><strong>Example:</strong> "This product is amazing!" → Positive</div>
<button class="llm-btn" onclick="explainTerm('Sentiment Analysis', 'Determining the emotional tone behind text — positive, negative, or neutral.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<h2 class="section-title">Model Concepts</h2> <h2 class="section-title">Model Concepts</h2>
@@ -130,33 +174,39 @@
<span class="category">Model</span> <span class="category">Model</span>
<h3>LLM (Large Language Model)</h3> <h3>LLM (Large Language Model)</h3>
<p>A neural network with billions of parameters trained on massive text corpora to understand and generate human language. Examples: GPT-4, Claude, Gemini, Llama.</p> <p>A neural network with billions of parameters trained on massive text corpora to understand and generate human language. Examples: GPT-4, Claude, Gemini, Llama.</p>
<button class="llm-btn" onclick="explainTerm('LLM - Large Language Model', 'A neural network with billions of parameters trained on massive text corpora to understand and generate human language.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Model</span> <span class="category">Model</span>
<h3>Pre-trained Model</h3> <h3>Pre-trained Model</h3>
<p>A model that has already been trained on a large dataset and can be used as-is or fine-tuned for specific tasks.</p> <p>A model that has already been trained on a large dataset and can be used as-is or fine-tuned for specific tasks.</p>
<button class="llm-btn" onclick="explainTerm('Pre-trained Model', 'A model that has already been trained on a large dataset and can be used as-is or fine-tuned for specific tasks.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Model</span> <span class="category">Model</span>
<h3>Fine-tuning</h3> <h3>Fine-tuning</h3>
<p>Taking a pre-trained model and continuing to train it on a smaller, task-specific dataset to adapt its behavior.</p> <p>Taking a pre-trained model and continuing to train it on a smaller, task-specific dataset to adapt its behavior.</p>
<div class="example"><strong>Example:</strong> Fine-tuning GPT-4 on medical texts so it answers healthcare questions more accurately.</div> <div class="example"><strong>Example:</strong> Fine-tuning GPT-4 on medical texts so it answers healthcare questions more accurately.</div>
<button class="llm-btn" onclick="explainTerm('Fine-tuning', 'Taking a pre-trained model and continuing to train it on a smaller, task-specific dataset to adapt its behavior.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Model</span> <span class="category">Model</span>
<h3>Parameters</h3> <h3>Parameters</h3>
<p>The internal variables of a model that are adjusted during training. More parameters generally mean greater capacity to learn complex patterns.</p> <p>The internal variables of a model that are adjusted during training. More parameters generally mean greater capacity to learn complex patterns.</p>
<div class="example"><strong>Example:</strong> GPT-4 is estimated to have trillions of parameters.</div> <div class="example"><strong>Example:</strong> GPT-4 is estimated to have trillions of parameters.</div>
<button class="llm-btn" onclick="explainTerm('Parameters', 'The internal variables of a model that are adjusted during training. More parameters generally mean greater capacity to learn complex patterns.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Model</span> <span class="category">Model</span>
<h3>Inference</h3> <h3>Inference</h3>
<p>The process of using a trained model to generate outputs for new inputs (as opposed to training the model).</p> <p>The process of using a trained model to generate outputs for new inputs (as opposed to training the model).</p>
<button class="llm-btn" onclick="explainTerm('Inference', 'The process of using a trained model to generate outputs for new inputs (as opposed to training the model).')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<div class="def-card"> <div class="def-card">
<span class="category">Model</span> <span class="category">Model</span>
<h3>Weights</h3> <h3>Weights</h3>
<p>The numerical values learned during training that determine how input signals are transformed as they pass through the network.</p> <p>The numerical values learned during training that determine how input signals are transformed as they pass through the network.</p>
<button class="llm-btn" onclick="explainTerm('Weights', 'The numerical values learned during training that determine how input signals are transformed as they pass through the network.')"><span class="icon">💬</span> Explain deeper</button>
</div> </div>
<h2 class="section-title">Common Acronyms</h2> <h2 class="section-title">Common Acronyms</h2>
@@ -187,5 +237,34 @@
<footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer> <footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer>
<script src="../lib/modal.js"></script>
<script src="../lib/llm.js"></script>
<script>
(function(){
function explainTerm(title, definition) {
LLMModal.open('💬 ' + title);
var messages = [
{ role: 'system', content: 'You are an AI educator explaining technical terms simply. Keep explanations to 2-3 short paragraphs with a practical example. Use the definition provided as your starting point.' },
{ role: 'user', content: 'Explain this AI term in simple, practical terms: ' + title + '. Definition: ' + definition + '.' }
];
var fullText = '';
LLM.callAPI(
messages,
function(chunk) {
fullText += chunk;
LLMModal.update(fullText);
},
function() {},
function(err) {
LLMModal.error(err);
}
);
}
window.explainTerm = explainTerm;
})();
</script>
</body> </body>
</html> </html>

View File

@@ -10,7 +10,7 @@
<nav> <nav>
<div class="nav-inner"> <div class="nav-inner">
<a href="/" class="nav-brand">AI Cheat Sheet</a> <a href="../index.html" class="nav-brand">AI Cheat Sheet</a>
<div class="nav-links"> <div class="nav-links">
<a href="/pages/terminology.html">Terminology</a> <a href="/pages/terminology.html">Terminology</a>
<a href="/pages/techniques.html">Techniques</a> <a href="/pages/techniques.html">Techniques</a>
@@ -19,11 +19,15 @@
<a href="/pages/prompts.html">Prompt Guide</a> <a href="/pages/prompts.html">Prompt Guide</a>
<a href="/pages/math.html">Math & Concepts</a> <a href="/pages/math.html">Math & Concepts</a>
<a href="/pages/chat.html">Chat</a> <a href="/pages/chat.html">Chat</a>
<a href="/pages/image-gen.html">Image Gen</a>
</div> </div>
<button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button> <button class="dark-toggle" id="darkToggle" aria-label="Toggle dark mode">🌙</button>
</div> </div>
</nav> </nav>
<button class="menu-toggle" id="menuToggle" aria-label="Toggle menu"></button>
<div class="sidebar-backdrop" id="sidebarBackdrop"></div>
<script> <script>
(function(){ (function(){
var btn = document.getElementById('darkToggle'); var btn = document.getElementById('darkToggle');
@@ -44,6 +48,34 @@
localStorage.setItem('theme','dark'); localStorage.setItem('theme','dark');
} }
}); });
var menuToggle = document.getElementById('menuToggle');
var nav = document.querySelector('nav');
var backdrop = document.getElementById('sidebarBackdrop');
if(menuToggle && nav){
menuToggle.addEventListener('click', function(){
nav.classList.toggle('sidebar-open');
var isOpen = nav.classList.contains('sidebar-open');
menuToggle.textContent = isOpen ? '✕' : '☰';
if(backdrop){
backdrop.classList.toggle('visible', isOpen);
}
});
if(backdrop){
backdrop.addEventListener('click', function(){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
backdrop.classList.remove('visible');
});
}
document.addEventListener('click', function(e){
if(nav.classList.contains('sidebar-open') && !nav.contains(e.target) && e.target !== menuToggle){
nav.classList.remove('sidebar-open');
menuToggle.textContent = '☰';
if(backdrop) backdrop.classList.remove('visible');
}
});
}
})(); })();
</script> </script>
@@ -147,31 +179,52 @@
<div class="icon">🏥</div> <div class="icon">🏥</div>
<h3>Healthcare</h3> <h3>Healthcare</h3>
<p>Medical image analysis, drug discovery, clinical note generation, symptom triage, and personalized treatment plans.</p> <p>Medical image analysis, drug discovery, clinical note generation, symptom triage, and personalized treatment plans.</p>
<button class="llm-btn" onclick="brainstormUseCase('💡 Healthcare AI Use Cases', 'Give me 5 specific, actionable AI use cases for a mid-size hospital system. For each, describe the AI technique, expected impact, and implementation complexity.')"><span class="icon">💡</span> Brainstorm</button>
</div> </div>
<div class="use-card"> <div class="use-card">
<div class="icon">💰</div> <div class="icon">💰</div>
<h3>Finance</h3> <h3>Finance</h3>
<p>Fraud detection, algorithmic trading, risk assessment, credit scoring, and compliance monitoring.</p> <p>Fraud detection, algorithmic trading, risk assessment, credit scoring, and compliance monitoring.</p>
<button class="llm-btn" onclick="brainstormUseCase('💡 Finance AI Use Cases', 'Give me 5 specific, actionable AI use cases for a fintech startup. For each, describe the AI technique, expected impact, and key data needed.')"><span class="icon">💡</span> Brainstorm</button>
</div> </div>
<div class="use-card"> <div class="use-card">
<div class="icon">🚗</div> <div class="icon">🚗</div>
<h3>Automotive</h3> <h3>Automotive</h3>
<p>Autonomous driving, predictive maintenance, route optimization, and in-car voice assistants.</p> <p>Autonomous driving, predictive maintenance, route optimization, and in-car voice assistants.</p>
<button class="llm-btn" onclick="brainstormUseCase('💡 Automotive AI Use Cases', 'Give me 5 specific, actionable AI use cases for an automotive company. For each, describe the AI technique, expected impact, and implementation complexity.')"><span class="icon">💡</span> Brainstorm</button>
</div> </div>
<div class="use-card"> <div class="use-card">
<div class="icon">🎓</div> <div class="icon">🎓</div>
<h3>Education</h3> <h3>Education</h3>
<p>Personalized tutoring, automated grading, curriculum design, and interactive learning experiences.</p> <p>Personalized tutoring, automated grading, curriculum design, and interactive learning experiences.</p>
<button class="llm-btn" onclick="brainstormUseCase('💡 Education AI Use Cases', 'Give me 5 specific, actionable AI use cases for an online education platform. For each, describe the AI technique, expected impact, and key data needed.')"><span class="icon">💡</span> Brainstorm</button>
</div> </div>
<div class="use-card"> <div class="use-card">
<div class="icon">🏭</div> <div class="icon">🏭</div>
<h3>Manufacturing</h3> <h3>Manufacturing</h3>
<p>Quality inspection via computer vision, supply chain optimization, predictive maintenance, and digital twins.</p> <p>Quality inspection via computer vision, supply chain optimization, predictive maintenance, and digital twins.</p>
<button class="llm-btn" onclick="brainstormUseCase('💡 Manufacturing AI Use Cases', 'Give me 5 specific, actionable AI use cases for a manufacturing company. For each, describe the AI technique, expected impact, and implementation complexity.')"><span class="icon">💡</span> Brainstorm</button>
</div> </div>
<div class="use-card"> <div class="use-card">
<div class="icon">⚖️</div> <div class="icon">⚖️</div>
<h3>Legal</h3> <h3>Legal</h3>
<p>Contract review, legal research, case prediction, document drafting, and compliance analysis.</p> <p>Contract review, legal research, case prediction, document drafting, and compliance analysis.</p>
<button class="llm-btn" onclick="brainstormUseCase('💡 Legal AI Use Cases', 'Give me 5 specific, actionable AI use cases for a law firm. For each, describe the AI technique, expected impact, and key considerations.')"><span class="icon">💡</span> Brainstorm</button>
</div>
</div>
<h2 class="section-title">AI Assistant</h2>
<div class="def-card">
<span class="category">Interactive</span>
<h3>🤖 Brainstorm AI Use Cases</h3>
<p>Describe your industry, project, or problem — and the AI will suggest specific, actionable use cases with implementation details.</p>
<div class="llm-mini-chat" id="brainstorm-chat">
<div class="llm-mini-chat-header"><h4>💡 Brainstorm Assistant</h4><button class="llm-close-btn" onclick="this.closest(\'.llm-mini-chat\').classList.remove(\'visible\')"></button></div>
<div class="llm-mini-chat-input-row">
<input class="llm-mini-chat-input" id="brainstorm-input" placeholder="Describe your industry, project, or problem..." />
<button class="llm-mini-chat-send" onclick="doBrainstorm()">Go</button>
</div>
<div class="llm-mini-chat-output" id="brainstorm-output" style="margin-top: 0.8rem;"></div>
</div> </div>
</div> </div>
@@ -179,5 +232,61 @@
<footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer> <footer>AI Cheat Sheet &mdash; A learning reference for artificial intelligence</footer>
<script src="../lib/modal.js"></script>
<script src="../lib/llm.js"></script>
<script>
(function(){
function brainstormUseCase(title, prompt) {
LLMModal.open(title);
var messages = [
{ role: 'system', content: 'You are an AI consultant helping professionals brainstorm practical AI use cases. For each use case, describe: 1) The specific problem it solves, 2) The AI technique/model needed, 3) Expected impact (quantified if possible), 4) Implementation complexity (Easy/Medium/Hard), 5) Key data or resources needed. Be concrete and actionable.' },
{ role: 'user', content: prompt }
];
var fullText = '';
LLM.callAPI(
messages,
function(chunk) {
fullText += chunk;
LLMModal.update(fullText);
},
function() {},
function(err) {
LLMModal.error(err);
}
);
}
function doBrainstorm() {
var input = document.getElementById('brainstorm-input');
var output = document.getElementById('brainstorm-output');
var text = input.value.trim();
if (!text) return;
output.innerHTML = '<span class="llm-loading">Brainstorming...</span>';
var messages = [
{ role: 'system', content: 'You are an AI consultant helping professionals brainstorm practical AI use cases. For each use case, describe: 1) The specific problem it solves, 2) The AI technique or model needed, 3) Expected impact, 4) Implementation complexity (Easy/Medium/Hard), 5) Key data or resources needed. Be concrete and actionable. Give 5-7 use cases.' },
{ role: 'user', content: 'I work in ' + text + '. Brainstorm specific, actionable AI use cases for my context. Tailor each one to my industry, company size, and available resources. For each use case, describe the problem it solves, the AI technique needed, expected impact, implementation complexity, and key data or resources needed.' }
];
LLM.chatWithHistory('brainstorm-output', messages).catch(function() {});
}
var brainstormInput = document.getElementById('brainstorm-input');
if (brainstormInput) {
brainstormInput.addEventListener('keydown', function(e) {
if (e.key === 'Enter') {
e.preventDefault();
doBrainstorm();
}
});
}
window.brainstormUseCase = brainstormUseCase;
window.doBrainstorm = doBrainstorm;
})();
</script>
</body> </body>
</html> </html>