checkpoint commit
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 1m3s

This commit is contained in:
2026-05-05 06:26:40 -04:00
parent e43c534ceb
commit f2015e2c71
76 changed files with 4265 additions and 235 deletions

View File

@@ -4,6 +4,7 @@
- [Reeseapps vs Reeselink](#reeseapps-vs-reeselink)
- [Reeselink Addresses](#reeselink-addresses)
- [Reeseapps Addresses](#reeseapps-addresses)
- [Converting Unifi Records to AWS Records](#converting-unifi-records-to-aws-records)
## Reeseapps vs Reeselink
@@ -28,3 +29,14 @@ aws route53 change-resource-record-sets --hosted-zone-id $(cat active/aws_route5
```bash
aws route53 change-resource-record-sets --hosted-zone-id $(cat active/aws_route53/secrets/reeseapps-zoneid) --change-batch file://active/aws_route53/secrets/reeseapps.json
```
## Converting Unifi Records to AWS Records
The script `unifi_to_aws.py` will create a file at
`secrets/unifi_reeselink_records.json` which contains all `reeselink.com`
domains in the unifi server converted to AWS route53 batch format. Simply run
the script and then use that file to update reeselink.com records.
```python
python active/aws_route53/unifi_to_aws.py
```

View File

@@ -0,0 +1,76 @@
#!/bin/bash
# --- Configuration ---
PYTHON_SCRIPT="active/aws_route53/unifi_to_aws.py"
ZONE_ID_FILE="active/aws_route53/secrets/reeselink-zoneid"
RECORDS_FILE="active/aws_route53/secrets/unifi_reeselink_records.json"
# --- Colors for logging ---
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# --- Logging Function ---
log() {
echo -e "[$(date +'%Y-%m-%d %H:%M:%S')] $1"
}
error_exit() {
echo -e "[$(date +'%Y-%m-%d %H:%M:%S')] ${RED}ERROR: $1${NC}" >&2
exit 1
}
# --- 1. Pre-flight Checks ---
log "${YELLOW}Starting Route53 update process...${NC}"
if [[ ! -f "$PYTHON_SCRIPT" ]]; then
error_exit "Python script not found at $PYTHON_SCRIPT"
fi
if [[ ! -f "$ZONE_ID_FILE" ]]; then
error_exit "Zone ID file not found at $ZONE_ID_FILE"
fi
# --- 2. Run Python Script ---
log "Running $PYTHON_SCRIPT to generate JSON records..."
# Execute the python script
python "$PYTHON_SCRIPT"
# Check the exit code of the python script
if [[ $? -eq 0 ]]; then
log "${GREEN}Python script executed successfully.${NC}"
else
error_exit "Python script failed. Aborting AWS update to prevent corrupting DNS."
fi
# Verify the output file actually exists after the python run
if [[ ! -f "$RECORDS_FILE" ]]; then
error_exit "Python script reported success, but $RECORDS_FILE was not found."
fi
# --- 3. Update Route53 ---
# Read the Zone ID from the secret file
ZONE_ID=$(cat "$ZONE_ID_FILE" | tr -d '\n\r ')
if [[ -z "$ZONE_ID" ]]; then
error_exit "Zone ID file is empty or could not be read."
fi
log "Updating Route53 records for Zone ID: $ZONE_ID..."
# Run the AWS CLI command
# Using file:// prefix as required by AWS CLI for local files
aws route53 change-resource-record-sets \
--hosted-zone-id "$ZONE_ID" \
--change-batch "file://$RECORDS_FILE"
# Check the exit code of the AWS command
if [[ $? -eq 0 ]]; then
log "${GREEN}Route53 records updated successfully!${NC}"
else
error_exit "AWS CLI command failed. Check your AWS credentials and JSON formatting."
fi
log "${GREEN}Process complete.${NC}"

View File

@@ -0,0 +1,113 @@
import json
import os
import sys
from typing import Any, Dict, List
import requests
# Configuration
API_KEY = os.environ.get("API_KEY")
URL_DEVICES = "https://10.1.0.1/proxy/network/v2/api/site/default/static-dns/devices"
URL_POLICIES = "https://10.1.0.1/proxy/network/integration/v1/sites/88f7af54-98f8-306a-a1c7-c9349722b1f6/dns/policies"
OUTPUT_FILE = "active/aws_route53/secrets/unifi_reeselink_records.json"
ALLOWED_DOMAIN = "reeselink.com"
FIXED_TTL = 60
# Headers
headers = {"Accept": "application/json", "X-API-Key": API_KEY}
def fetch_json(url: str) -> Any:
"""Helper to perform the GET request and return parsed JSON."""
response = requests.get(
url,
headers=headers,
verify=False, # -k: Don't verify SSL certificate
allow_redirects=True, # -L: Follow redirects
)
if response.status_code != 200:
print(f"Error: Received status code {response.status_code} from {url}")
print(f"Response: {response.text}")
sys.exit(1)
return response.json()
def main():
all_changes: List[Dict[str, Any]] = []
# 1. Process Devices API
devices_data = fetch_json(URL_DEVICES)
devices_count = 0
# devices_data is expected to be a list: [{hostname: ..., ip_address: ...}, ...]
for device in devices_data:
hostname = device.get("hostname", "")
ip = device.get("ip_address", "")
if hostname.endswith(ALLOWED_DOMAIN):
all_changes.append(
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": hostname,
"Type": "A",
"TTL": FIXED_TTL,
"ResourceRecords": [{"Value": ip}],
},
}
)
devices_count += 1
# 2. Process Policies API
policies_response = fetch_json(URL_POLICIES)
policies_count = 0
# policies_response is expected to be a dict: {"data": [{domain: ..., ipv4Address: ...}, ...]}
policies_list = policies_response.get("data", [])
for policy in policies_list:
domain = policy.get("domain", "")
ip = policy.get("ipv4Address", "")
if domain.endswith(ALLOWED_DOMAIN):
all_changes.append(
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": domain,
"Type": "A",
"TTL": FIXED_TTL,
"ResourceRecords": [{"Value": ip}],
},
}
)
policies_count += 1
# Construct Final AWS Payload
final_payload = {
"Comment": "Combined records from Unifi devices and policies",
"Changes": all_changes,
}
# Write to file
try:
# Ensure directory exists
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
with open(OUTPUT_FILE, "w") as f:
json.dump(final_payload, f, indent=4)
except Exception as e:
print(f"Error writing to file: {e}")
sys.exit(1)
# Print Summary
print(f"Successfully processed records:")
print(f" - devices: {devices_count}")
print(f" - policies: {policies_count}")
print(f"Total records in file: {len(all_changes)}")
print(f"Saved to {OUTPUT_FILE}")
if __name__ == "__main__":
# Suppress InsecureRequestWarning for verify=False
requests.packages.urllib3.disable_warnings() # type: ignore
main()

View File

@@ -154,7 +154,7 @@ curl -L -X POST 'https://aipi.reeseapps.com/v1/chat/completions' \
-H 'Content-Type: application/json' \
-H 'Authorization: Bearer sk-1234' \
-d '{
"model": "gpt-4o-mini", # 👈 REPLACE with 'public model name' for any db-model
"model": "driveripper/think",
"messages": [
{
"content": "Hey, how's it going",

View File

@@ -70,11 +70,6 @@ active/container_caddy/install_caddy_proxy.yaml
ansible-playbook \
-i ansible/inventory.yaml \
active/container_caddy/install_caddy_deskwork.yaml
# Toybox (AI) Proxy
ansible-playbook \
-i ansible/inventory.yaml \
active/container_caddy/install_caddy_toybox.yaml
```
See ansible playbook [install_caddy.yaml](/active/container_caddy/install_caddy.yaml)

View File

@@ -58,7 +58,7 @@ Now you can install the DDNS service with something like:
```bash
ansible-playbook \
-i ansible/inventory.yaml \
-l proxy \
-l proxy-root \
active/container_ddns/install_ddns.yaml
```

View File

@@ -3,6 +3,7 @@
- [Driveripper](#driveripper)
- [General Principles](#general-principles)
- [Important Locations](#important-locations)
- [Backups](#backups)
- [Monitoring Scripts](#monitoring-scripts)
- [Quick Ansible Commands](#quick-ansible-commands)
- [Quickstart VM](#quickstart-vm)
@@ -12,7 +13,7 @@
- [Best Practices](#best-practices)
- [OSBuild Composer](#osbuild-composer)
- [Retired Disks](#retired-disks)
- [Sending emails](#sending-emails)
- [Sending emails](#sending-emails)
## General Principles
@@ -27,6 +28,28 @@
- `/etc/luks-keys`: luks keys
- `/usr/local/scripts`: admin scripts
## Backups
```bash
# smb
rsync -av --progress \
--exclude .snapshots \
/srv/smb/ \
/srv/backup/smb/
# archive
rsync -av --progress \
--exclude .snapshots \
/srv/archive/ \
/srv/backup/archive/
# vm
rsync -av --progress \
--exclude .snapshots \
/srv/vm/ \
/srv/backup/vm/
```
## Monitoring Scripts
```bash
@@ -196,7 +219,7 @@ Retired 12-19-2025
![alt text](image-1.png)
### Sending emails
## Sending emails
```bash
# s-nail is mailx

Binary file not shown.

View File

@@ -8,6 +8,7 @@
- [Adding a New Device](#adding-a-new-device)
- [Controlling Home Assistant](#controlling-home-assistant)
- [Configuration Sections](#configuration-sections)
- [Symbols](#symbols)
- [esphome](#esphome)
- [esp32](#esp32-1)
- [logger](#logger)
@@ -63,7 +64,8 @@ uv venv
uv pip install esphome
source .venv/bin/activate
esphome run m5stack-atom-echo.yaml
# grep for debug logs only (helpful for filtering noise)
esphome run tab1.yaml | grep -E '.*[\[D\]]'
```
## Adding a New Device
@@ -78,6 +80,13 @@ esphome run m5stack-atom-echo.yaml
<https://esphome.io/components/>
## Symbols
You can display the embedded symbols among the text by their codepoint address
preceded by \u. For example: \uF00C :
![alt text](image.png)
### esphome
### esp32
@@ -145,6 +154,22 @@ data:
media_content_id: "media-source://media_source/local/wake_word_triggered.wav"
```
Playing arbitrary sound:
```yaml
audio_file:
- id: beep_sound
file: "beep.wav"
media_source:
- platform: audio_file
id: file_source
- media_player.speaker.play_on_device_media_file:
media_file: beep_sound
announcement: true
```
### voice assistant
<https://esphome.io/components/voice_assistant/>

View File

@@ -54,24 +54,24 @@ i2c:
id: bus_1
sensor:
- platform: sonic_i2c
i2c_id: bus_1
address: 0x57
name: "Ultrasonic Sensor 1"
id: ultrasonic1
unit_of_measurement: mm
update_interval: 5s
filters:
- filter_out: nan
- lambda: |-
if (x == 0) {
return {}; // This filters out the reading
} else {
return x; // This passes the reading through
}
- sliding_window_moving_average:
window_size: 10
send_every: 20
- platform: sonic_i2c
i2c_id: bus_1
address: 0x57
name: "Ultrasonic Sensor 1"
id: ultrasonic1
unit_of_measurement: mm
update_interval: 5s
filters:
- filter_out: nan
- lambda: |-
if (x == 0) {
return {}; // This filters out the reading
} else {
return x; // This passes the reading through
}
- sliding_window_moving_average:
window_size: 10
send_every: 20
button:
- platform: factory_reset
@@ -99,7 +99,7 @@ speaker:
dac_type: external
bits_per_sample: 16bit
sample_rate: 16000
channel: stereo # The Echo has poor playback audio quality when using mon audio
channel: stereo # The Echo has poor playback audio quality when using mon audio
buffer_duration: 60ms
media_player:
@@ -197,7 +197,7 @@ voice_assistant:
- delay: 2s
- script.execute: reset_led
on_client_connected:
- delay: 2s # Give the api server time to settle
- delay: 2s # Give the api server time to settle
- script.execute: start_wake_word
on_client_disconnected:
- script.execute: stop_wake_word
@@ -355,24 +355,24 @@ switch:
on_turn_off:
# Turn off the repeat mode and disable the pause between playlist items
- lambda: |-
id(echo_media_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_REPEAT_OFF)
.set_announcement(true)
.perform();
id(echo_media_player)->set_playlist_delay_ms(speaker::AudioPipelineType::ANNOUNCEMENT, 0);
id(echo_media_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_REPEAT_OFF)
.set_announcement(true)
.perform();
id(echo_media_player)->set_playlist_delay_ms(speaker::AudioPipelineType::ANNOUNCEMENT, 0);
# Stop playing the alarm
- media_player.stop:
announcement: true
on_turn_on:
# Turn on the repeat mode and pause for 1000 ms between playlist items/repeats
- lambda: |-
id(echo_media_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_REPEAT_ONE)
.set_announcement(true)
.perform();
id(echo_media_player)->set_playlist_delay_ms(speaker::AudioPipelineType::ANNOUNCEMENT, 1000);
id(echo_media_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_REPEAT_ONE)
.set_announcement(true)
.perform();
id(echo_media_player)->set_playlist_delay_ms(speaker::AudioPipelineType::ANNOUNCEMENT, 1000);
- media_player.speaker.play_on_device_media_file:
media_file: timer_finished_wave_file
announcement: true

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 277 KiB

After

Width:  |  Height:  |  Size: 115 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 431 KiB

After

Width:  |  Height:  |  Size: 144 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 430 KiB

After

Width:  |  Height:  |  Size: 109 KiB

View File

@@ -0,0 +1,997 @@
---
substitutions:
name: pyramid1
friendly_name: Pyramid 1
# Casita images
loading_illustration_file: https://github.com/esphome/wake-word-voice-assistants/raw/main/casita/loading_320_240.png
idle_illustration_file: https://github.com/esphome/wake-word-voice-assistants/raw/main/casita/idle_320_240.png
listening_illustration_file: https://github.com/esphome/wake-word-voice-assistants/raw/main/casita/listening_320_240.png
thinking_illustration_file: https://github.com/esphome/wake-word-voice-assistants/raw/main/casita/thinking_320_240.png
replying_illustration_file: https://github.com/esphome/wake-word-voice-assistants/raw/main/casita/replying_320_240.png
error_illustration_file: https://github.com/esphome/wake-word-voice-assistants/raw/main/casita/error_320_240.png
error_no_wifi_illustration_file: https://github.com/esphome/wake-word-voice-assistants/raw/main/error_box_illustrations/error-no-wifi.png
error_no_ha_illustration_file: https://github.com/esphome/wake-word-voice-assistants/raw/main/error_box_illustrations/error-no-ha.png
# Fonts
mdi_webfont_file: https://raw.githubusercontent.com/Templarian/MaterialDesign-Webfont/master/fonts/materialdesignicons-webfont.ttf
# Audio files
wake_word_trigger_sound_file: wake_word_triggered.wav
# timer_finished_sound_file: https://github.com/esphome/home-assistant-voice-pe/raw/dev/sounds/timer_finished.flac
# error_cloud_expired_file: https://github.com/esphome/home-assistant-voice-pe/raw/dev/sounds/error_cloud_expired.mp3
# Micro wake word models
pick_pig: https://raw.githubusercontent.com/esphome/micro-wake-word-models/refs/heads/main/models/v2/experiments/hey_peppa_pig.json
stop_model_file: https://github.com/kahrendt/microWakeWord/releases/download/stop/stop.json
# Background colors
loading_illustration_background_color: "000000"
idle_illustration_background_color: "000000"
listening_illustration_background_color: "FFFFFF"
thinking_illustration_background_color: "FFFFFF"
replying_illustration_background_color: "FFFFFF"
error_illustration_background_color: "000000"
# Phases of the Voice Assistant
# The voice assistant is ready to be triggered by a wake word
voice_assist_idle_phase_id: "1"
# The voice assistant is listening for a voice command
voice_assist_listening_phase_id: "2"
# The voice assistant is currently processing the command
voice_assist_thinking_phase_id: "3"
# The voice assistant is replying to the command
voice_assist_replying_phase_id: "4"
# The voice assistant is not ready
voice_assist_not_ready_phase_id: "10"
# The voice assistant encountered an error
voice_assist_error_phase_id: "11"
# Muted phase
voice_assist_muted_phase_id: "12"
# Finished timer phase
voice_assist_timer_finished_phase_id: "20"
esphome:
name: pyramid1
friendly_name: Pyramid 1
min_version: 2025.11.3
on_boot:
priority: 600
then:
- delay: 30s
- if:
condition:
lambda: return id(init_in_progress);
then:
- lambda: id(init_in_progress) = false;
esp32:
variant: esp32s3
flash_size: 8MB
cpu_frequency: 240MHz
framework:
type: esp-idf
api:
encryption:
key: "innoIL7I6ZfRekL58F65REjeYNLW1Hp/Q/Kv9SEjnNA="
ota:
- platform: esphome
password: "22de00dcf5c2701a25d2fe719d596123"
wifi:
ssid: !secret wifi_ssid
password: !secret wifi_password
ap:
ssid: "Echo-Pyramid Fallback Hotspot"
password: "uSTvJjVzweZp"
# Enable logging
logger:
level: INFO
logs:
sensor: WARN
captive_portal:
button:
- platform: factory_reset
id: factory_reset_btn
internal: true
binary_sensor:
- platform: gpio
pin:
number: GPIO41
mode: INPUT_PULLUP
inverted: true
id: user_button
internal: true
on_multi_click:
- timing:
- ON for at least 50ms
- OFF for at least 50ms
then:
- switch.turn_off: timer_ringing
- timing:
- ON for at least 10s
then:
- button.press: factory_reset_btn
external_components:
- source: github://m5stack/esphome-yaml/components
components: [aw87559, si5351, lp5562, pyramidrgb, pyramidtouch]
refresh: 0s
# I2C Bus Configuration
i2c:
- id: bsp_bus
sda: GPIO45
scl: GPIO0
scan: true
- id: ext_bus # used on atomic echo base
sda: GPIO38
scl: GPIO39
# Ehco Base GPIO Expander
pi4ioe5v6408:
- id: pi4ioe5v6408_hub
i2c_id: ext_bus
address: 0x43
aw87559:
id: audio_amp
i2c_id: ext_bus
address: 0x5B
si5351:
id: clock_gen
i2c_id: ext_bus
address: 0x60
# I2S Bus Configuration
i2s_audio:
- id: i2s_audio_bus
i2s_lrclk_pin: GPIO8
i2s_bclk_pin: GPIO6
spi:
clk_pin: GPIO15
mosi_pin: GPIO21
# miso_pin is not used
audio_dac:
- platform: es8311
id: es8311_dac
i2c_id: ext_bus
bits_per_sample: 16bit
sample_rate: 16000
audio_adc:
- platform: es7210
id: es7210_adc
i2c_id: ext_bus
address: 0x40
bits_per_sample: 16bit
sample_rate: 16000
microphone:
- platform: i2s_audio
id: i2s_mic
sample_rate: 16000
i2s_din_pin: GPIO5
bits_per_sample: 16bit
adc_type: external
channel: stereo
speaker:
- platform: i2s_audio
id: i2s_speaker
i2s_dout_pin: GPIO7
dac_type: external
bits_per_sample: 16bit
sample_rate: 16000
channel: mono
audio_dac: es8311_dac
media_player:
- platform: speaker
name: "Echo Pyramid Player"
id: echo_pyramid_player
volume_min: 0.0
volume_max: 1.0
volume_initial: 0.10
buffer_size: 6000
announcement_pipeline:
speaker: i2s_speaker
format: WAV
# sample_rate: 48000
# num_channels: 1
codec_support_enabled: false
files:
- id: wake_word_triggered_sound
file: ${wake_word_trigger_sound_file}
# - id: timer_finished_sound
# file: ${timer_finished_sound_file}
# - id: error_cloud_expired
# file: ${error_cloud_expired_file}
on_state:
- logger.log: "State updated!"
on_play:
- logger.log: "Playback started!"
on_announcement:
- logger.log: "Announcing!"
# Stop the wake word (mWW or VA) if the mic is capturing
- if:
condition:
- microphone.is_capturing:
then:
- script.execute: stop_wake_word
# Ensure VA stops before moving on
- if:
condition:
- lambda: |-
return id(wake_word_engine_location).current_option() == "In Home Assistant";
then:
- wait_until:
- not:
voice_assistant.is_running:
# Since VA isn't running, this is user-intiated media playback. Draw the mute display
- if:
condition:
not:
voice_assistant.is_running:
then:
- lambda: id(voice_assistant_phase) = ${voice_assist_muted_phase_id};
- script.execute: draw_display
on_idle:
# Since VA isn't running, this is the end of user-intiated media playback. Restart the wake word.
- if:
condition:
not:
voice_assistant.is_running:
then:
- script.execute: start_wake_word
- script.execute: set_idle_or_mute_phase
- script.execute: draw_display
switch:
# NS4150B
- platform: gpio
name: Speaker Enable
pin:
pi4ioe5v6408: pi4ioe5v6408_hub
number: 0
mode:
output: true
icon: "mdi:volume-high"
restore_mode: RESTORE_DEFAULT_ON
- platform: template
name: Mute Microphone
id: mute
icon: "mdi:microphone-off"
optimistic: true
restore_mode: RESTORE_DEFAULT_OFF
entity_category: config
on_turn_off:
- microphone.unmute:
- lambda: id(voice_assistant_phase) = ${voice_assist_idle_phase_id};
on_turn_on:
- microphone.mute:
- lambda: id(voice_assistant_phase) = ${voice_assist_muted_phase_id};
- platform: template
id: timer_ringing
optimistic: true
internal: true
restore_mode: ALWAYS_OFF
on_turn_off:
# Turn off the repeat mode and disable the pause between playlist items
- lambda: |-
id(echo_pyramid_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_REPEAT_OFF)
.set_announcement(true)
.perform();
id(echo_pyramid_player)->set_playlist_delay_ms(speaker::AudioPipelineType::ANNOUNCEMENT, 0);
# Stop playing the alarm
- media_player.stop:
announcement: true
- script.execute: start_wake_word
on_turn_on:
- script.execute: stop_wake_word
# Turn on the repeat mode and pause for 1000 ms between playlist items/repeats
- lambda: |-
id(echo_pyramid_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_REPEAT_ONE)
.set_announcement(true)
.perform();
id(echo_pyramid_player)->set_playlist_delay_ms(speaker::AudioPipelineType::ANNOUNCEMENT, 1000);
# - media_player.speaker.play_on_device_media_file:
# media_file: timer_finished_sound
# announcement: true
- delay: 15min
- switch.turn_off: timer_ringing
select:
- platform: template
entity_category: config
name: Wake word engine location
id: wake_word_engine_location
icon: "mdi:account-voice"
optimistic: true
restore_value: true
options:
- In Home Assistant
- On device
initial_option: On device
on_value:
- if:
condition:
lambda: return !id(init_in_progress);
then:
- wait_until:
lambda: return id(voice_assistant_phase) == ${voice_assist_muted_phase_id} || id(voice_assistant_phase) == ${voice_assist_idle_phase_id};
- if:
condition:
lambda: return x == "In Home Assistant";
then:
- micro_wake_word.stop
- delay: 500ms
- if:
condition:
switch.is_off: mute
then:
- lambda: id(va).set_use_wake_word(true);
- voice_assistant.start_continuous:
- if:
condition:
lambda: return x == "On device";
then:
- lambda: id(va).set_use_wake_word(false);
- voice_assistant.stop
- delay: 500ms
- if:
condition:
switch.is_off: mute
then:
- micro_wake_word.start
- platform: template
name: "Wake word sensitivity"
optimistic: true
initial_option: Slightly sensitive
restore_value: true
entity_category: config
options:
- Slightly sensitive
- Moderately sensitive
- Very sensitive
on_value:
# Sets specific wake word probabilities computed for each particular model
# Note probability cutoffs are set as a quantized uint8 value, each comment has the corresponding floating point cutoff
# False Accepts per Hour values are tested against all units and channels from the Dinner Party Corpus.
# These cutoffs apply only to the specific models included in the firmware: okay_nabu@20241226.3, hey_jarvis@v2, hey_mycroft@v2
lambda: |-
if (x == "Slightly sensitive") {
id(okay_nabu).set_probability_cutoff(217); // 0.85 -> 0.000 FAPH on DipCo (Manifest's default)
id(hey_jarvis).set_probability_cutoff(247); // 0.97 -> 0.563 FAPH on DipCo (Manifest's default)
id(hey_mycroft).set_probability_cutoff(253); // 0.99 -> 0.567 FAPH on DipCo
} else if (x == "Moderately sensitive") {
id(okay_nabu).set_probability_cutoff(176); // 0.69 -> 0.376 FAPH on DipCo
id(hey_jarvis).set_probability_cutoff(235); // 0.92 -> 0.939 FAPH on DipCo
id(hey_mycroft).set_probability_cutoff(242); // 0.95 -> 1.502 FAPH on DipCo (Manifest's default)
} else if (x == "Very sensitive") {
id(okay_nabu).set_probability_cutoff(143); // 0.56 -> 0.751 FAPH on DipCo
id(hey_jarvis).set_probability_cutoff(212); // 0.83 -> 1.502 FAPH on DipCo
id(hey_mycroft).set_probability_cutoff(237); // 0.93 -> 1.878 FAPH on DipCo
}
micro_wake_word:
id: mww
microphone: i2s_mic
models:
- model: okay_nabu
id: okay_nabu
- model: hey_jarvis
id: hey_jarvis
- model: hey_mycroft
id: hey_mycroft
- model: https://raw.githubusercontent.com/esphome/micro-wake-word-models/refs/heads/main/models/v2/experiments/hey_peppa_pig.json
id: hey_peppa_pig
- model: ${stop_model_file}
id: stop
internal: true
vad:
on_wake_word_detected:
- script.execute:
id: play_sound
priority: true
sound_file: !lambda return id(wake_word_triggered_sound);
- wait_until:
condition:
- media_player.is_announcing:
timeout: 0.5s
# Announcement is finished and the I2S bus is free
- wait_until:
- and:
- not:
media_player.is_announcing:
- not:
speaker.is_playing:
- voice_assistant.start:
wake_word: !lambda return wake_word;
voice_assistant:
id: va
microphone: i2s_mic
media_player: echo_pyramid_player
micro_wake_word: mww
noise_suppression_level: 2
auto_gain: 31dBFS
volume_multiplier: 2.0
on_listening:
- lambda: id(voice_assistant_phase) = ${voice_assist_listening_phase_id};
- script.execute: draw_display
on_stt_vad_end:
- lambda: id(voice_assistant_phase) = ${voice_assist_thinking_phase_id};
- script.execute: draw_display
on_tts_start:
- lambda: id(voice_assistant_phase) = ${voice_assist_replying_phase_id};
- script.execute: draw_display
on_end:
# Wait a short amount of time to see if an announcement starts
- wait_until:
condition:
- media_player.is_announcing:
timeout: 0.5s
# Announcement is finished and the I2S bus is free
- wait_until:
- and:
- not:
media_player.is_announcing:
- not:
speaker.is_playing:
# Restart only mWW if enabled; streaming wake words automatically restart
- if:
condition:
- lambda: |-
return id(wake_word_engine_location).current_option() == "On device";
then:
- lambda: id(va).set_use_wake_word(false);
- micro_wake_word.start:
- script.execute: set_idle_or_mute_phase
- script.execute: draw_display
on_error:
# Only set the error phase if the error code is different than duplicate_wake_up_detected or stt-no-text-recognized
# These two are ignored for a better user experience
- if:
condition:
and:
- lambda: return !id(init_in_progress);
- lambda: return code != "duplicate_wake_up_detected";
- lambda: return code != "stt-no-text-recognized";
then:
- lambda: id(voice_assistant_phase) = ${voice_assist_error_phase_id};
- script.execute: draw_display
- delay: 1s
- if:
condition:
switch.is_off: mute
then:
- lambda: id(voice_assistant_phase) = ${voice_assist_idle_phase_id};
else:
- lambda: id(voice_assistant_phase) = ${voice_assist_muted_phase_id};
# If the error code is cloud-auth-failed, serve a local audio file guiding the user.
- if:
condition:
- lambda: return code == "cloud-auth-failed";
then:
# - script.execute:
# id: play_sound
# priority: true
# sound_file: !lambda return id(error_cloud_expired);
- script.execute: draw_display
on_client_connected:
- lambda: id(init_in_progress) = false;
- script.execute: start_wake_word
- script.execute: set_idle_or_mute_phase
- script.execute: draw_display
on_client_disconnected:
- script.execute: stop_wake_word
- lambda: id(voice_assistant_phase) = ${voice_assist_not_ready_phase_id};
- script.execute: draw_display
on_timer_finished:
- switch.turn_on: timer_ringing
- wait_until:
media_player.is_announcing:
- lambda: id(voice_assistant_phase) = ${voice_assist_timer_finished_phase_id};
globals:
- id: init_in_progress
type: bool
restore_value: false
initial_value: "true"
- id: voice_assistant_phase
type: int
restore_value: false
initial_value: ${voice_assist_not_ready_phase_id}
- id: current_volume
type: float
restore_value: true
initial_value: "0.3"
sensor:
- platform: pyramidtouch
address: 0x1A
i2c_id: ext_bus
update_interval: 50ms
publish_swipe_event: true
swipe_timeout_ms: 500
touch1:
name: "Touch 1"
touch2:
name: "Touch 2"
touch3:
name: "Touch 3"
touch4:
name: "Touch 4"
swipe_event:
name: "Touch Swipe Event"
entity_category: diagnostic
on_value:
then:
- lambda: |-
// Swipe codes:
// 1 = Left Up (volume up)
// 2 = Left Down (volume down)
// 3 = Right Up (brightness up)
// 4 = Right Down (brightness down)
const float volume_step = 0.05f; // 5% volume per gesture
const float brightness_step = 5.0f; // 5% brightness per gesture
const int ev = (int) x;
if (ev == 1 || ev == 2) {
// Left side: control volume (0.0 - 1.0)
float v = id(current_volume);
if (ev == 1) {
v = std::min(1.0f, v + volume_step);
} else {
v = std::max(0.0f, v - volume_step);
}
auto call = id(echo_pyramid_player).make_call();
call.set_volume(v);
call.perform();
id(current_volume) = v;
} else if (ev == 3 || ev == 4) {
// Right side: control RGB brightness (0 - 100)
float b = id(rgb_master_brightness).state;
if (ev == 3) {
b = std::min(100.0f, b + brightness_step);
} else {
b = std::max(0.0f, b - brightness_step);
}
uint8_t b8 = (uint8_t) b;
id(pyramid_rgb1).set_strip_brightness(1, b8);
id(pyramid_rgb2).set_strip_brightness(2, b8);
id(rgb_master_brightness).publish_state(b);
} else {
return;
}
lp5562:
id: lp5562_led
i2c_id: bsp_bus
use_internal_clk: true
# power_save_mode: true
# high_pwm_freq: true
# logarithmic_dimming: true
white_current: 17.5
pyramidrgb:
- id: pyramid_rgb1
i2c_id: ext_bus
address: 0x1A
strip: 1
brightness: 80
- id: pyramid_rgb2
i2c_id: ext_bus
address: 0x1A
strip: 2
brightness: 80
number:
# Master media player volume (0.01.0)
- platform: template
name: "Master Volume"
id: master_volume
icon: "mdi:volume-high"
min_value: 0.0
max_value: 0.4
step: 0.01
restore_value: true
initial_value: 0.3
optimistic: true
set_action:
- lambda: |-
float v = x;
auto call = id(echo_pyramid_player).make_call();
call.set_volume(v);
call.perform();
id(current_volume) = v;
# Master RGB brightness (applies to both strips, 0100%)
- platform: template
name: "RGB Master Brightness"
id: rgb_master_brightness
icon: "mdi:brightness-6"
min_value: 0
max_value: 100
step: 1
restore_value: true
initial_value: 100
optimistic: true
set_action:
- lambda: |-
uint8_t b = (uint8_t) x;
id(pyramid_rgb1).set_strip_brightness(1, b);
id(pyramid_rgb2).set_strip_brightness(2, b);
output:
- platform: lp5562
id: lp5562_white_channel
lp5562_id: lp5562_led
channel: white
- platform: pyramidrgb
id: rgb1_ch0_red
pyramidrgb_id: pyramid_rgb1
channel: 0
color: red
- platform: pyramidrgb
id: rgb1_ch0_green
pyramidrgb_id: pyramid_rgb1
channel: 0
color: green
- platform: pyramidrgb
id: rgb1_ch0_blue
pyramidrgb_id: pyramid_rgb1
channel: 0
color: blue
# Strip 1, Channel 1 (Group 2)
- platform: pyramidrgb
id: rgb1_ch1_red
pyramidrgb_id: pyramid_rgb1
channel: 1
color: red
- platform: pyramidrgb
id: rgb1_ch1_green
pyramidrgb_id: pyramid_rgb1
channel: 1
color: green
- platform: pyramidrgb
id: rgb1_ch1_blue
pyramidrgb_id: pyramid_rgb1
channel: 1
color: blue
# Strip 2, Channel 2 (Group 1)
- platform: pyramidrgb
id: rgb2_ch2_red
pyramidrgb_id: pyramid_rgb2
channel: 2
color: red
- platform: pyramidrgb
id: rgb2_ch2_green
pyramidrgb_id: pyramid_rgb2
channel: 2
color: green
- platform: pyramidrgb
id: rgb2_ch2_blue
pyramidrgb_id: pyramid_rgb2
channel: 2
color: blue
# Strip 2, Channel 3 (Group 2)
- platform: pyramidrgb
id: rgb2_ch3_red
pyramidrgb_id: pyramid_rgb2
channel: 3
color: red
- platform: pyramidrgb
id: rgb2_ch3_green
pyramidrgb_id: pyramid_rgb2
channel: 3
color: green
- platform: pyramidrgb
id: rgb2_ch3_blue
pyramidrgb_id: pyramid_rgb2
channel: 3
color: blue
light:
- platform: monochromatic
name: "LCD Backlight"
output: lp5562_white_channel
icon: "mdi:television"
restore_mode: RESTORE_DEFAULT_ON
- platform: rgb
name: "Strip1 Group1"
red: rgb1_ch0_red
green: rgb1_ch0_green
blue: rgb1_ch0_blue
restore_mode: RESTORE_DEFAULT_ON
- platform: rgb
name: "Strip1 Group2"
red: rgb1_ch1_red
green: rgb1_ch1_green
blue: rgb1_ch1_blue
restore_mode: RESTORE_DEFAULT_ON
- platform: rgb
name: "Strip2 Group1"
red: rgb2_ch2_red
green: rgb2_ch2_green
blue: rgb2_ch2_blue
restore_mode: RESTORE_DEFAULT_ON
- platform: rgb
name: "Strip2 Group2"
red: rgb2_ch3_red
green: rgb2_ch3_green
blue: rgb2_ch3_blue
restore_mode: RESTORE_DEFAULT_ON
display:
- platform: mipi_spi
id: atoms3r_lcd
model: ST7789V
dc_pin: GPIO42
reset_pin: GPIO48
cs_pin: GPIO14
data_rate: 40MHz
dimensions:
height: 128
width: 128
offset_width: 2
offset_height: 1
invert_colors: true
rotation: 180°
pages:
- id: idle_page
lambda: |-
it.fill(id(idle_color));
it.image((it.get_width() / 2), (it.get_height() / 2), id(casita_idle), ImageAlign::CENTER);
- id: listening_page
lambda: |-
it.fill(id(listening_color));
it.image((it.get_width() / 2), (it.get_height() / 2), id(casita_listening), ImageAlign::CENTER);
- id: thinking_page
lambda: |-
it.fill(id(thinking_color));
it.image((it.get_width() / 2), (it.get_height() / 2), id(casita_thinking), ImageAlign::CENTER);
- id: replying_page
lambda: |-
it.fill(id(replying_color));
it.image((it.get_width() / 2), (it.get_height() / 2), id(casita_replying), ImageAlign::CENTER);
- id: error_page
lambda: |-
it.fill(id(error_color));
it.image((it.get_width() / 2), (it.get_height() / 2), id(casita_error), ImageAlign::CENTER);
- id: no_ha_page
lambda: |-
it.image((it.get_width() / 2), (it.get_height() / 2), id(error_no_ha), ImageAlign::CENTER);
- id: no_wifi_page
lambda: |-
it.image((it.get_width() / 2), (it.get_height() / 2), id(error_no_wifi), ImageAlign::CENTER);
- id: initializing_page
lambda: |-
it.fill(id(loading_color));
it.image((it.get_width() / 2), (it.get_height() / 2), id(casita_initializing), ImageAlign::CENTER);
- id: muted_page
lambda: |-
it.fill(Color::BLACK);
it.printf(0, 0, id(mdi_icon_128), Color::WHITE, "%s", "\U000F036D");
script:
# Starts either mWW or the streaming wake word, depending on the configured location
- id: start_wake_word
then:
- if:
condition:
and:
- not:
- voice_assistant.is_running:
- lambda: |-
return id(wake_word_engine_location).current_option() == "On device";
then:
- lambda: id(va).set_use_wake_word(false);
- micro_wake_word.start:
- if:
condition:
and:
- not:
- voice_assistant.is_running:
- lambda: |-
return id(wake_word_engine_location).current_option() == "In Home Assistant";
then:
- lambda: id(va).set_use_wake_word(true);
- voice_assistant.start_continuous:
# Stops either mWW or the streaming wake word, depending on the configured location
- id: stop_wake_word
then:
- if:
condition:
lambda: |-
return id(wake_word_engine_location).current_option() == "In Home Assistant";
then:
- lambda: id(va).set_use_wake_word(false);
- voice_assistant.stop:
- if:
condition:
lambda: |-
return id(wake_word_engine_location).current_option() == "On device";
then:
- micro_wake_word.stop:
# Set the voice assistant phase to idle or muted, depending on if the software mute switch is activated
- id: set_idle_or_mute_phase
then:
- if:
condition:
switch.is_off: mute
then:
- lambda: id(voice_assistant_phase) = ${voice_assist_idle_phase_id};
else:
- lambda: id(voice_assistant_phase) = ${voice_assist_muted_phase_id};
- id: play_sound
parameters:
priority: bool
sound_file: "audio::AudioFile*"
then:
- lambda: |-
if (priority) {
id(echo_pyramid_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_STOP)
.set_announcement(true)
.perform();
}
if ( (id(echo_pyramid_player).state != media_player::MediaPlayerState::MEDIA_PLAYER_STATE_ANNOUNCING ) || priority) {
id(echo_pyramid_player)
->play_file(sound_file, true, false);
}
- id: draw_display
then:
- if:
condition:
lambda: return !id(init_in_progress);
then:
- if:
condition:
wifi.connected:
then:
- if:
condition:
api.connected:
then:
- lambda: |
switch(id(voice_assistant_phase)) {
case ${voice_assist_listening_phase_id}:
id(atoms3r_lcd).show_page(listening_page);
id(atoms3r_lcd).update();
break;
case ${voice_assist_thinking_phase_id}:
id(atoms3r_lcd).show_page(thinking_page);
id(atoms3r_lcd).update();
break;
case ${voice_assist_replying_phase_id}:
id(atoms3r_lcd).show_page(replying_page);
id(atoms3r_lcd).update();
break;
case ${voice_assist_error_phase_id}:
id(atoms3r_lcd).show_page(error_page);
id(atoms3r_lcd).update();
break;
case ${voice_assist_muted_phase_id}:
id(atoms3r_lcd).show_page(muted_page);
id(atoms3r_lcd).update();
break;
case ${voice_assist_not_ready_phase_id}:
id(atoms3r_lcd).show_page(no_ha_page);
id(atoms3r_lcd).update();
break;
default:
id(atoms3r_lcd).show_page(idle_page);
id(atoms3r_lcd).update();
}
else:
- display.page.show: no_ha_page
- component.update: atoms3r_lcd
else:
- display.page.show: no_wifi_page
- component.update: atoms3r_lcd
else:
- display.page.show: initializing_page
- component.update: atoms3r_lcd
image:
- file: ${error_illustration_file}
id: casita_error
resize: 160x120
type: RGB
transparency: alpha_channel
- file: ${idle_illustration_file}
id: casita_idle
resize: 160x120
type: RGB
transparency: alpha_channel
- file: ${listening_illustration_file}
id: casita_listening
resize: 160x120
type: RGB
transparency: alpha_channel
- file: ${thinking_illustration_file}
id: casita_thinking
resize: 160x120
type: RGB
transparency: alpha_channel
- file: ${replying_illustration_file}
id: casita_replying
resize: 160x120
type: RGB
transparency: alpha_channel
- file: ${loading_illustration_file}
id: casita_initializing
resize: 160x120
type: RGB
transparency: alpha_channel
- file: ${error_no_wifi_illustration_file}
id: error_no_wifi
resize: 160x120
type: RGB
transparency: alpha_channel
- file: ${error_no_ha_illustration_file}
id: error_no_ha
resize: 160x120
type: RGB
transparency: alpha_channel
font:
- file: ${mdi_webfont_file}
id: mdi_icon_128
size: 128
bpp: 4
glyphs:
- "\U000F036D" # mdi:mic-mute
color:
- id: idle_color
hex: ${idle_illustration_background_color}
- id: listening_color
hex: ${listening_illustration_background_color}
- id: thinking_color
hex: ${thinking_illustration_background_color}
- id: replying_color
hex: ${replying_illustration_background_color}
- id: loading_color
hex: ${loading_illustration_background_color}
- id: error_color
hex: ${error_illustration_background_color}

View File

@@ -0,0 +1,599 @@
esphome:
name: tab1
friendly_name: M5Stack Tab5 1
on_boot:
# Set the charging icon to the correct state on boot
- then:
- logger.log: "Delaying backlight initialization"
- delay: 2s
- logger.log: "End delay"
- if:
condition:
lambda: return id(charging).state;
then:
- lvgl.widget.show:
id: charging_icon_widget
else:
- lvgl.widget.hide:
id: charging_icon_widget
esp32:
board: esp32-p4-evboard
flash_size: 16MB
framework:
type: esp-idf
advanced:
enable_idf_experimental_features: true
esp32_hosted:
variant: esp32c6
active_high: true
clk_pin: GPIO12
cmd_pin: GPIO13
d0_pin: GPIO11
d1_pin: GPIO10
d2_pin: GPIO9
d3_pin: GPIO8
reset_pin: GPIO15
slot: 1
logger:
hardware_uart: USB_SERIAL_JTAG
level: DEBUG
psram:
mode: hex
speed: 200MHz
api:
# Touchscreen support
external_components:
- source: github://pr#12075
components: [st7123]
refresh: 1h
ota:
platform: esphome
wifi:
ssid: !secret wifi_ssid
password: !secret wifi_password
fast_connect: true
on_connect:
- lvgl.label.update:
id: lbl_status
text: "IDLE"
- select.set:
id: dac_output
option: "LINE1"
- lvgl.label.update:
id: lbl_ip
text: !lambda return id(ip_addr).state;
- lvgl.label.update:
id: lbl_ap
text: !lambda return id(ssid).state;
on_disconnect:
- lvgl.label.update:
id: lbl_status
text: "DISCONNECTED"
text_sensor:
- platform: wifi_info
ip_address:
id: ip_addr
name: Device IP Address
address_0:
name: Device IP Address 0
address_1:
name: Device IP Address 1
address_2:
name: Device IP Address 2
address_3:
name: Device IP Address 3
address_4:
name: Device IP Address 4
ssid:
id: ssid
name: Device Connected SSID
bssid:
name: Device Connected BSSID
mac_address:
name: Device Mac Wifi Address
scan_results:
name: Device Latest Scan Results
dns_address:
name: Device DNS Address
power_save_mode:
name: Device Wifi Power Save Mode
time:
- platform: sntp
id: sntp_time
timezone: America/New_York
servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
# wireguard:
# address: !secret tab1_wg_ip
# private_key: !secret tab1_wg_pk
# peer_endpoint: !secret wg_host
# peer_public_key: !secret wg_pubkey
# # Optional keepalive (disabled by default)
# peer_persistent_keepalive: 25s
i2c:
- id: bsp_bus
sda: GPIO31
scl: GPIO32
frequency: 400kHz
pi4ioe5v6408:
- id: pi4ioe1
address: 0x43
# 0: O - wifi_antenna_int_ext
# 1: O - speaker_enable
# 2: O - external_5v_power
# 3: NC
# 4: O - lcd reset
# 5: O - touch panel reset
# 6: O - camera reset
# 7: I - headphone detect
- id: pi4ioe2
address: 0x44
# 0: O - wifi_power
# 1: NC
# 2: NC
# 3: O - usb_5v_power
# 4: O - poweroff pulse
# 5: O - quick charge enable (inverted)
# 6: I - charging status
# 7: O - charge enable
button:
- platform: restart
name: "Restart Tablet"
switch:
- platform: gpio
id: wifi_power
name: "WiFi Power"
pin:
pi4ioe5v6408: pi4ioe2
number: 0
restore_mode: ALWAYS_ON
- platform: gpio
id: usb_5v_power
name: "USB Power"
pin:
pi4ioe5v6408: pi4ioe2
number: 3
- platform: gpio
id: quick_charge
name: "Quick Charge"
pin:
pi4ioe5v6408: pi4ioe2
number: 5
inverted: true
- platform: gpio
id: charge_enable
name: "Charge Enable"
pin:
pi4ioe5v6408: pi4ioe2
number: 7
restore_mode: ALWAYS_ON
- platform: gpio
id: wifi_antenna_int_ext
pin:
pi4ioe5v6408: pi4ioe1
number: 0
- platform: gpio
id: speaker_enable
name: "Speaker Enable"
pin:
pi4ioe5v6408: pi4ioe1
number: 1
restore_mode: ALWAYS_ON
- platform: gpio
id: external_5v_power
name: "External 5V Power"
pin:
pi4ioe5v6408: pi4ioe1
number: 2
binary_sensor:
- platform: gpio
id: charging
name: "Charging Status"
pin:
pi4ioe5v6408: pi4ioe2
number: 6
mode: INPUT_PULLDOWN
on_state:
then:
- if:
condition:
lambda: return id(charging).state;
then:
- lvgl.widget.show:
id: charging_icon_widget
else:
- lvgl.widget.hide:
id: charging_icon_widget
- platform: gpio
id: headphone_detect
name: "Headphone Detect"
pin:
pi4ioe5v6408: pi4ioe1
number: 7
- platform: lvgl
widget: volume_up_widget
name: Volume Up Button
on_press:
then:
- logger.log: "Button pressed"
- media_player.volume_up:
id: tab5_media_player
- delay: 100ms
- lvgl.label.update:
id: lbl_volume
text: !lambda return to_string(int(id(tab5_media_player).volume * 100));
- light.turn_on:
id: backlight
brightness: !lambda |-
float current_value = id(backlight).current_values.get_brightness();
return current_value < 0.6 ? 0.6 : current_value + 0.2;
- platform: lvgl
widget: volume_down_widget
name: Volume Down Button
on_press:
then:
- logger.log: "Button pressed"
- media_player.volume_down:
id: tab5_media_player
- lvgl.label.update:
id: lbl_volume
text: !lambda return to_string(int(id(tab5_media_player).volume * 100));
- light.turn_on:
id: backlight
brightness: !lambda |-
float current_value = id(backlight).current_values.get_brightness();
return current_value < 0.6 ? 0.6 : current_value - 0.2;
sensor:
- platform: ina226
address: 0x41
adc_averaging: 16
max_current: 8.192A
shunt_resistance: 0.005ohm
bus_voltage:
id: battery_voltage
name: "Battery Voltage"
current:
id: battery_current
name: "Battery Current"
# Positive means discharging
# Negative means charging
# Tab5 built-in battery discharges from full (8.23 V) to shutdown threshold (6.0 V)
- platform: template
name: "Battery Percentage"
lambda: |-
float voltage = id(battery_voltage).state;
// Adjust these values based on your battery's actual min/max voltage
float min_voltage = 6.75; // Discharged voltage
float max_voltage = 8.2; // Fully charged voltage
float percentage = (voltage - min_voltage) / (max_voltage - min_voltage) * 100.0;
if (percentage > 100.0) return 100.0;
if (percentage < 0.0) return 0.0;
return percentage;
update_interval: 60s
unit_of_measurement: "%"
accuracy_decimals: 1
id: battery_percent
on_value:
then:
- lvgl.label.update:
id: lbl_battery
text:
format: "Battery: %.1f%%"
args: ["id(battery_percent).state"]
touchscreen:
- platform: st7123
i2c_id: bsp_bus
interrupt_pin: GPIO23
display: lcd
update_interval: never
reset_pin:
pi4ioe5v6408: pi4ioe1
number: 5
calibration:
x_min: 0
x_max: 720
y_min: 0
y_max: 1280
id: touch
on_touch:
- logger.log: "LVGL resuming"
- lvgl.resume:
- light.turn_on: backlight
# on_release:
# - media_player.stop:
esp_ldo:
- voltage: 2.5V
channel: 3
display:
- platform: mipi_dsi
id: lcd
dimensions:
height: 1280
width: 720
model: M5STACK-TAB5-V2
reset_pin:
pi4ioe5v6408: pi4ioe1
number: 4
output:
- platform: ledc
pin: GPIO22
id: backlight_pwm
frequency: 1000Hz
light:
- platform: monochromatic
output: backlight_pwm
name: "Display Backlight"
id: backlight
restore_mode: ALWAYS_ON
default_transition_length: 250ms
initial_state:
brightness: "50%"
image:
defaults:
type: rgb565
transparency: alpha_channel
resize: 512x512
byte_order: little_endian
images:
- file: "images/va_idle.png"
id: va_idle
- file: "images/va_listen.png"
id: va_listen
- file: "images/va_speak.png"
id: va_speak
- file: "images/charging.png"
id: charging_icon
resize: 64x64
lvgl:
byte_order: little_endian
on_idle:
timeout: 120s
then:
- logger.log: "LVGL is idle"
- light.turn_off:
id: backlight
transition_length: 15s
- lvgl.pause:
widgets:
- obj:
align: TOP_MID
width: 100%
height: 100%
layout:
type: flex
flex_flow: column
flex_align_main: START
flex_align_track: center
flex_align_cross: center
widgets:
- label:
align: TOP_MID
id: lbl_status
text_font: montserrat_48
text: "CONNECTING..."
- label:
align: TOP_MID
id: lbl_ap
text_font: montserrat_22
text: "CONNECTING..."
- label:
align: TOP_MID
id: lbl_ip
text_font: montserrat_22
text: "CONNECTING..."
- image:
id: listen_icon_widget
src: va_idle
align: CENTER
- label:
align: BOTTOM_LEFT
id: lbl_version
text_font: montserrat_12
text: "v0.6"
- label:
align: BOTTOM_RIGHT
id: lbl_battery
text_font: montserrat_28
text: Loading...
- image:
id: charging_icon_widget
src: charging_icon
align: TOP_RIGHT
- button:
id: volume_up_widget
widgets:
- label:
text: "\uF028"
text_font: montserrat_48
text_align: CENTER
align: CENTER
x: 20
y: 20
width: 100
height: 100
pad_all: 8
- button:
id: volume_down_widget
widgets:
- label:
text: "\uF027"
text_font: montserrat_48
text_align: CENTER
align: CENTER
x: 20
y: 140
width: 100
height: 100
pad_all: 8
- label:
x: 20
y: 260
id: lbl_volume
text_font: montserrat_28
text: !lambda return "%.1f",to_string(id(tab5_media_player).volume);
# The DAC Output select needs to be manually (or with an automation) changed to `LINE1` for the onboard speaker
select:
- platform: es8388
dac_output:
name: DAC Output
id: dac_output
adc_input_mic:
name: ADC Input Mic
id: adc_input
- platform: template
id: wifi_antenna_select
name: "WiFi Antenna"
options:
- "Internal"
- "External"
optimistic: true
on_value:
- if:
condition:
lambda: return i == 0;
then:
- switch.turn_off: wifi_antenna_int_ext
else:
- switch.turn_on: wifi_antenna_int_ext
i2s_audio:
- id: mic_bus
i2s_lrclk_pin: GPIO29
i2s_bclk_pin: GPIO27
i2s_mclk_pin: GPIO30
audio_adc:
- platform: es7210
id: es7210_adc
bits_per_sample: 16bit
sample_rate: 16000
microphone:
- platform: i2s_audio
id: tab5_microphone
i2s_din_pin: GPIO28
sample_rate: 16000
bits_per_sample: 16bit
adc_type: external
audio_dac:
- platform: es8388
id: es8388_dac
speaker:
- platform: i2s_audio
id: tab5_speaker
i2s_dout_pin: GPIO26
audio_dac: es8388_dac
dac_type: external
channel: mono
buffer_duration: 100ms
bits_per_sample: 16bit
sample_rate: 48000
media_player:
- platform: speaker
name: None
id: tab5_media_player
announcement_pipeline:
speaker: tab5_speaker
format: WAV
micro_wake_word:
id: mww
models:
- okay_nabu
- hey_mycroft
- hey_jarvis
on_wake_word_detected:
- voice_assistant.start:
wake_word: !lambda return wake_word;
voice_assistant:
id: va
microphone: tab5_microphone
media_player: tab5_media_player
micro_wake_word: mww
on_listening:
- logger.log: "LVGL resuming"
- lvgl.resume:
- light.turn_on: backlight
- lvgl.image.update:
id: listen_icon_widget
src: va_listen
- lvgl.label.update:
id: lbl_status
text: "LISTENING"
on_stt_vad_end:
- lvgl.label.update:
id: lbl_status
text: "PROCESSING"
- lvgl.image.update:
id: listen_icon_widget
src: va_idle
on_tts_start:
- lvgl.label.update:
id: lbl_status
text: "RESPONDING"
- lvgl.image.update:
id: listen_icon_widget
src: va_speak
on_end:
# Wait a short amount of time to see if an announcement starts
- wait_until:
condition:
- media_player.is_announcing:
timeout: 0.5s
# Announcement is finished and the I2S bus is free
- wait_until:
- and:
- not:
media_player.is_announcing:
- not:
speaker.is_playing:
- micro_wake_word.start:
- lvgl.label.update:
id: lbl_status
text: "IDLE"
- lvgl.image.update:
id: listen_icon_widget
src: va_idle
- light.turn_off:
id: backlight
transition_length: 15s
on_client_connected:
- micro_wake_word.start:
on_client_disconnected:
- micro_wake_word.stop:

View File

@@ -1,6 +1,18 @@
esphome:
name: tab1
friendly_name: M5Stack Tab5 1
name: tab2
friendly_name: M5Stack Tab5 2
on_boot:
# Set the charing icon to the correct state on boot
- then:
- if:
condition:
lambda: return id(charging).state;
then:
- lvgl.widget.show:
id: charging_icon_widget
else:
- lvgl.widget.hide:
id: charging_icon_widget
esp32:
board: esp32-p4-evboard
@@ -43,6 +55,7 @@ ota:
wifi:
ssid: !secret wifi_ssid
password: !secret wifi_password
fast_connect: true
on_connect:
- lvgl.label.update:
id: lbl_status
@@ -54,6 +67,29 @@ wifi:
- lvgl.label.update:
id: lbl_status
text: "DISCONNECTED"
# ap:
# password: !secret hotspot_password
# ap_timeout: 90s
# captive_portal:
time:
- platform: sntp
id: sntp_time
timezone: America/New_York
servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
# wireguard:
# address: !secret tab1_wg_ip
# private_key: !secret tab1_wg_pk
# peer_endpoint: !secret wg_host
# peer_public_key: !secret wg_pubkey
# # Optional keepalive (disabled by default)
# peer_persistent_keepalive: 25s
i2c:
- id: bsp_bus
@@ -142,6 +178,17 @@ binary_sensor:
pi4ioe5v6408: pi4ioe2
number: 6
mode: INPUT_PULLDOWN
on_state:
then:
- if:
condition:
lambda: return id(charging).state;
then:
- lvgl.widget.show:
id: charging_icon_widget
else:
- lvgl.widget.hide:
id: charging_icon_widget
- platform: gpio
id: headphone_detect
@@ -186,7 +233,7 @@ sensor:
- lvgl.label.update:
id: lbl_battery
text:
format: "Battery: %.1f%"
format: "Battery: %.1f%%"
args: ["id(battery_percent).state"]
touchscreen:
@@ -255,6 +302,9 @@ image:
id: va_listen
- file: "images/va_speak.png"
id: va_speak
- file: "images/charging.png"
id: charging_icon
resize: 64x64
lvgl:
byte_order: little_endian
@@ -287,6 +337,25 @@ lvgl:
id: lbl_battery
text_font: montserrat_28
text: Loading...
- image:
id: charging_icon_widget
src: charging_icon
align: TOP_RIGHT
- slider:
id: backlight_slider
x: 20
y: 50
width: 30
height: 220
pad_all: 8
min_value: 0
max_value: 255
on_release:
- homeassistant.action:
action: light.turn_on
data:
entity_id: light.backlight
brightness: !lambda return int(x);
# The DAC Output select needs to be manually (or with an automation) changed to `LINE1` for the onboard speaker
select:

View File

@@ -0,0 +1,13 @@
rabbitmq:
host: "rabbitmq.reeselink.com"
port: 5672
virtual_host: "/"
username: "user"
password: "password"
# Which *exchange* (topic) you actually want to listen to.
# The program will create a temporary queue, bind it to this exchange
# with the routing key supplied in `routing_key`.
subscriber:
exchange: "nic" # ← change to “reese” or any other exchange
routing_key: "add" # ← could be “add”, “delete”, or any pattern

View File

@@ -0,0 +1,136 @@
#!/usr/bin/env python3
"""
RabbitMQ setup & consumer using pika.
- Creates two **topic exchanges**: nic, reese
- For each exchange creates two queues: add, delete
- Binds the queues with routing keys “add” and “delete”
- Subscribes (consumes) from a **single** exchange/queue pair that is
supplied via a tiny config file (config.yaml).
Run:
python3 rabbit_demo.py
"""
import logging
import sys
from pathlib import Path
import pika # type: ignore
import yaml
# ----------------------------------------------------------------------
# 1⃣ Load configuration
# ----------------------------------------------------------------------
DEFAULT_CFG = """
rabbitmq:
host: "localhost"
port: 5672
virtual_host: "/"
username: "guest"
password: "guest"
# Which *exchange* (topic) you actually want to listen to.
# The program will create a temporary queue, bind it to this exchange
# with the routing key supplied in `routing_key`.
subscriber:
exchange: "nic" # ← change to “reese” or any other exchange
routing_key: "add" # ← could be “add”, “delete”, or any pattern
"""
CONFIG_PATH = Path("active/device_unifi/config.yaml")
if not CONFIG_PATH.exists():
CONFIG_PATH.write_text(DEFAULT_CFG)
with CONFIG_PATH.open() as f:
cfg = yaml.safe_load(f)
# ----------------------------------------------------------------------
# 2⃣ Build connection parameters
# ----------------------------------------------------------------------
cred = pika.PlainCredentials(cfg["rabbitmq"]["username"], cfg["rabbitmq"]["password"])
params = pika.ConnectionParameters(
host=cfg["rabbitmq"]["host"],
port=cfg["rabbitmq"]["port"],
virtual_host=cfg["rabbitmq"]["virtual_host"],
credentials=cred,
)
# ----------------------------------------------------------------------
# 3⃣ Helper to declare exchanges / queues
# ----------------------------------------------------------------------
def declare_topology(channel):
"""
Create the two topic exchanges and the four queues,
then bind each queue to its exchange with the appropriate routing key.
"""
exchanges = ["nic", "reese"]
routing_keys = ["add", "delete"]
for exch in exchanges:
channel.exchange_declare(exchange=exch, exchange_type="topic", durable=True)
for key in routing_keys:
queue_name = f"{exch}_{key}" # e.g. nic_add, reese_delete
channel.queue_declare(queue=queue_name, durable=True)
# bind queue to the exchange with the same routing key
channel.queue_bind(queue=queue_name, exchange=exch, routing_key=key)
logging.info(
f"Declared queue {queue_name} bound to {exch} with key '{key}'"
)
# ----------------------------------------------------------------------
# 4⃣ Consumer callback
# ----------------------------------------------------------------------
def on_message(ch, method, properties, body):
logging.info(
f"Received from exchange '{method.exchange}' "
f"routing_key='{method.routing_key}': {body!r}"
)
# Acknowledge the message
ch.basic_ack(delivery_tag=method.delivery_tag)
# ----------------------------------------------------------------------
# 5⃣ Main routine
# ----------------------------------------------------------------------
def main():
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s"
)
with pika.BlockingConnection(params) as conn:
channel = conn.channel()
# 1⃣ Declare the static topology (exchanges + queues)
declare_topology(channel)
# 2⃣ Set up a *temporary* queue for the subscriber defined in config
result = channel.queue_declare(
queue="", exclusive=True
) # servergenerated name
tmp_queue = result.method.queue
exch = cfg["subscriber"]["exchange"]
rkey = cfg["subscriber"]["routing_key"]
channel.queue_bind(queue=tmp_queue, exchange=exch, routing_key=rkey)
logging.info(
f"Subscribed to exchange '{exch}' with routing_key '{rkey}' "
f"using temporary queue '{tmp_queue}'"
)
# 3⃣ Start consuming
channel.basic_consume(queue=tmp_queue, on_message_callback=on_message)
try:
logging.info("Waiting for messages. Press Ctrl+C to exit.")
channel.start_consuming()
except KeyboardInterrupt:
logging.info("Interrupted closing connection.")
channel.stop_consuming()
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,46 @@
import json
from pathlib import Path
import pika
import yaml
DEFAULT_CFG = """
rabbitmq:
host: "localhost"
port: 5672
virtual_host: "/"
username: "guest"
password: "guest"
# Which *exchange* (topic) you actually want to listen to.
# The program will create a temporary queue, bind it to this exchange
# with the routing key supplied in `routing_key`.
subscriber:
exchange: "nic" # ← change to “reese” or any other exchange
routing_key: "add" # ← could be “add”, “delete”, or any pattern
"""
CONFIG_PATH = Path("active/device_unifi/config.yaml")
if not CONFIG_PATH.exists():
CONFIG_PATH.write_text(DEFAULT_CFG)
with CONFIG_PATH.open() as f:
cfg = yaml.safe_load(f)
cred = pika.PlainCredentials(cfg["rabbitmq"]["username"], cfg["rabbitmq"]["password"])
params = pika.ConnectionParameters(
host=cfg["rabbitmq"]["host"],
port=cfg["rabbitmq"]["port"],
virtual_host=cfg["rabbitmq"]["virtual_host"],
credentials=cred,
)
with pika.BlockingConnection(params) as c:
ch = c.channel()
ch.basic_publish(
exchange="reese",
routing_key="add",
body=json.dumps({"msg": "hello nic add"}),
properties=pika.BasicProperties(delivery_mode=2), # make it persistent
)

View File

@@ -0,0 +1,15 @@
from update_dns import ApiHelperMethods, ApiPaths, ApiWrapper
def test_api_get():
assert ApiWrapper.api_get(ApiPaths.list_sites()) is not None
def test_site_name_to_id():
assert ApiHelperMethods.site_name_to_id("Default") is not None
def test_dns_record_exists():
assert ApiHelperMethods.dns_record_exists("test.reeselink.com")
assert not ApiHelperMethods.dns_record_exists("idontexist.reeselink.com")

View File

@@ -0,0 +1,58 @@
# Unifi
## Update DNS Records via API
```bash
export API_KEY=$(cat active/device_unifi/secrets/api-key)
# List site IDs
curl -L -g -k -s "https://192.168.1.1/proxy/network/integration/v1/sites" \
-H "Accept: application/json" \
-H "X-API-Key: ${API_KEY}" | jq -rc '.data[0].id'
# List domains
curl -L -g -k -s "https://192.168.1.1/proxy/network/integration/v1/sites/88f7af54-98f8-306a-a1c7-c9349722b1f6/dns/policies" \
-H "Accept: application/json" \
-H "X-API-Key: ${API_KEY}" | jq -r '.data[] | {domain, id}'
# List device domains
curl -L -g -k -s "https://10.1.0.1/proxy/network/v2/api/site/default/static-dns/devices" \
-H "Accept: application/json" \
-H "X-API-Key: ${API_KEY}" | jq -r
# List clients
curl -L -g -k -s "https://192.168.1.1/proxy/network/integration/v1/sites/88f7af54-98f8-306a-a1c7-c9349722b1f6/clients" \
-H "Accept: application/json" \
-H "X-API-Key: ${API_KEY}"
# List firewall policies
curl -L -g -k -s "https://192.168.1.1/proxy/network/integration/v1/sites/88f7af54-98f8-306a-a1c7-c9349722b1f6/firewall/policies" \
-H "Accept: application/json" \
-H "X-API-Key: ${API_KEY}"
# Create a record
curl -L -g -k -s "https://192.168.1.1/proxy/network/integration/v1/sites/88f7af54-98f8-306a-a1c7-c9349722b1f6/dns/policies" \
-H "Accept: application/json" \
-H "X-API-Key: ${API_KEY}" \
-H "Content-Type: application/json" \
-d "{
\"type\": \"A_RECORD\",
\"enabled\": true,
\"domain\": \"test.reeselink.com\",
\"ipv4Address\": \"10.1.0.100\",
\"ttlSeconds\": 300
}"
# Update a record
curl -L -g -k -s -X PUT "https://192.168.1.1/proxy/network/integration/v1/sites/88f7af54-98f8-306a-a1c7-c9349722b1f6/dns/policies/a5689d61-811a-48b0-a47c-2ece038e4356" \
-H "Accept: application/json" \
-H "X-API-Key: ${API_KEY}" \
-H "Content-Type: application/json" \
-d "{
\"type\": \"A_RECORD\",
\"enabled\": true,
\"domain\": \"test.reeselink.com\",
\"ipv4Address\": \"10.1.0.100\",
\"ttlSeconds\": 300
}"
```

View File

@@ -0,0 +1,103 @@
import json
import os
from typing import TypedDict
import requests
UNIFI_API_ENDPOINT = os.getenv("UNIFI_API_ENDPOINT", "https://192.168.1.1")
UNIFI_SITE_NAME = os.getenv("UNIFI_SITE_NAME", "Default")
UNIFI_API_KEY = os.getenv("UNIFI_API_KEY")
if not UNIFI_API_KEY:
try:
with open("active/device_unifi/secrets/api-key", "r") as f:
UNIFI_API_KEY = f.read()
except (FileNotFoundError, PermissionError) as e:
print(e)
print("UNIFI_API_KEY required.")
type uuid_type = str
class UnifiSite(TypedDict):
id: uuid_type
internalReference: str
name: str
class ApiPaths:
@classmethod
def list_sites(cls) -> str:
return "/proxy/network/integration/v1/sites"
@classmethod
def list_records(cls, site_id: uuid_type) -> str:
return f"/proxy/network/integration/v1/sites/{site_id}/dns/policies"
@classmethod
def create_record(cls, site_id: uuid_type) -> str:
return f"/proxy/network/integration/v1/sites/{site_id}/dns/policies"
@classmethod
def update_record(cls, site_id: uuid_type, record_id: uuid_type) -> str:
return f"/proxy/network/integration/v1/sites/{site_id}/dns/policies/{record_id}"
class ApiWrapper:
@classmethod
def api_get(cls, path: str):
return requests.get(
f"{UNIFI_API_ENDPOINT}{path}",
headers={"X-API-Key": UNIFI_API_KEY},
verify=False,
).json()
@classmethod
def api_put(cls, path: str, body: dict):
return requests.put(
f"{UNIFI_API_ENDPOINT}{path}",
headers={"X-API-Key": UNIFI_API_KEY},
verify=False,
json=json.dumps(body),
).json()
class ApiHelperMethods:
@classmethod
def site_name_to_id(cls, site_name: str) -> uuid_type:
results = ApiWrapper.api_get(ApiPaths.list_sites())
data: list[UnifiSite] = results.get("data")
if not data:
print("No sites found")
exit(1)
filtered_sites: list[UnifiSite] = list(
filter(lambda data_item: data_item["name"] == site_name, data)
)
if not filtered_sites:
print("Site with that name not found")
exit(1)
site_id = filtered_sites[0]["id"]
return site_id
# @classmethod
# def upsert_dns_ipv4_record(cls, record_name: str, ipv4_addr: str) -> uuid_type:
@classmethod
def dns_record_exists(cls, record_name: str) -> bool:
site_id = ApiHelperMethods.site_name_to_id(UNIFI_SITE_NAME)
record_id = ApiWrapper.api_get(path=ApiPaths.list_records(site_id))
matched_dns_records = list(
filter(
lambda record_item: record_item["domain"] == record_name,
record_id["data"],
)
)
if matched_dns_records:
return True
return False
if __name__ == "__main__":
import sys
site_name = sys.argv[1]

View File

@@ -0,0 +1,90 @@
<mxfile host="Electron" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/29.6.6 Chrome/144.0.7559.236 Electron/40.8.4 Safari/537.36" version="29.6.6">
<diagram name="Page-1" id="sur_P5ccan6r_R6vxB1T">
<mxGraphModel dx="1243" dy="832" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
<root>
<mxCell id="0" />
<mxCell id="1" parent="0" />
<mxCell id="lTIulqBT4iiTOOd0l-js-23" parent="1" style="rounded=0;whiteSpace=wrap;html=1;verticalAlign=top;fillColor=none;" value="Client" vertex="1">
<mxGeometry height="440" width="200" x="840" y="120" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-22" parent="1" style="rounded=0;whiteSpace=wrap;html=1;verticalAlign=top;fillColor=none;" value="Network Admin" vertex="1">
<mxGeometry height="440" width="200" x="560" y="120" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-21" parent="1" style="rounded=0;whiteSpace=wrap;html=1;verticalAlign=top;fillColor=none;" value="Developer" vertex="1">
<mxGeometry height="440" width="200" y="120" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-20" parent="1" style="rounded=0;whiteSpace=wrap;html=1;verticalAlign=top;fillColor=none;" value="Cluster Admin" vertex="1">
<mxGeometry height="440" width="200" x="280" y="120" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-4" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-1" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-3" value="Create Record">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-1" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="External DNS" vertex="1">
<mxGeometry height="60" width="120" x="320" y="160" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-13" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-2" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-8" value="Connect">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-19" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-2" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-3" value="Get IP">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="940" y="190" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-2" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Browser" vertex="1">
<mxGeometry height="60" width="120" x="880" y="240" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-3" parent="1" style="ellipse;shape=cloud;whiteSpace=wrap;html=1;" value="Route53" vertex="1">
<mxGeometry height="80" width="120" x="600" y="150" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-9" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-7" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-8" value="Allocate IP">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-7" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Metal LB" vertex="1">
<mxGeometry height="60" width="120" x="320" y="240" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-12" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-8" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-11" value="Forward to Gateway">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="660" y="350" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-8" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Router/Proxy" vertex="1">
<mxGeometry height="60" width="120" x="600" y="240" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-18" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-10" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-15" value="Create Storage">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-10" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Longhorn" vertex="1">
<mxGeometry height="60" width="120" x="320" y="480" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-17" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-11" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-14" value="Forward to Container">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-11" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Traefik" vertex="1">
<mxGeometry height="60" width="120" x="320" y="320" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-16" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-14" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-15">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-14" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Container" vertex="1">
<mxGeometry height="60" width="120" x="40" y="320" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-15" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Storage" vertex="1">
<mxGeometry height="60" width="120" x="40" y="400" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-26" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-24" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-25" value="Request Certificate">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-24" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Cert Manager" vertex="1">
<mxGeometry height="60" width="120" x="320" y="400" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-25" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="CA&lt;div&gt;(Let&#39;s Encrypt)&lt;/div&gt;" vertex="1">
<mxGeometry height="60" width="120" x="600" y="400" as="geometry" />
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>

View File

@@ -0,0 +1,96 @@
<mxfile host="Electron" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/29.6.6 Chrome/144.0.7559.236 Electron/40.8.4 Safari/537.36" version="29.6.6">
<diagram name="Page-1" id="sur_P5ccan6r_R6vxB1T">
<mxGraphModel dx="1243" dy="832" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
<root>
<mxCell id="0" />
<mxCell id="1" parent="0" />
<mxCell id="lTIulqBT4iiTOOd0l-js-23" parent="1" style="rounded=0;whiteSpace=wrap;html=1;verticalAlign=top;fillColor=none;" value="Client" vertex="1">
<mxGeometry height="520" width="200" x="840" y="40" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-22" parent="1" style="rounded=0;whiteSpace=wrap;html=1;verticalAlign=top;fillColor=none;" value="Network Admin" vertex="1">
<mxGeometry height="520" width="200" x="560" y="40" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-21" parent="1" style="rounded=0;whiteSpace=wrap;html=1;verticalAlign=top;fillColor=none;" value="Developer" vertex="1">
<mxGeometry height="520" width="200" y="40" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-20" parent="1" style="rounded=0;whiteSpace=wrap;html=1;verticalAlign=top;fillColor=none;" value="Cluster Admin" vertex="1">
<mxGeometry height="520" width="200" x="280" y="40" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-4" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-1" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-3" value="Create Record">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-1" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="External DNS" vertex="1">
<mxGeometry height="60" width="120" x="320" y="160" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-13" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-2" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-8" value="Connect">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-19" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-2" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-3" value="Get IP">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="940" y="190" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-2" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Browser" vertex="1">
<mxGeometry height="60" width="120" x="880" y="240" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-3" parent="1" style="ellipse;shape=cloud;whiteSpace=wrap;html=1;" value="Route53" vertex="1">
<mxGeometry height="80" width="120" x="600" y="150" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-9" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-7" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-8" value="Allocate IP">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-7" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Metal LB" vertex="1">
<mxGeometry height="60" width="120" x="320" y="240" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-12" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-8" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-11" value="Forward to Gateway">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="660" y="350" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-8" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Router/Proxy" vertex="1">
<mxGeometry height="60" width="120" x="600" y="240" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-18" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-10" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-15" value="Create Storage">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-10" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Longhorn" vertex="1">
<mxGeometry height="60" width="120" x="320" y="480" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-17" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-11" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-14" value="Forward to Container">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-11" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Traefik" vertex="1">
<mxGeometry height="60" width="120" x="320" y="320" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-16" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-14" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-15">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-14" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Container" vertex="1">
<mxGeometry height="60" width="120" x="40" y="320" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-15" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Storage" vertex="1">
<mxGeometry height="60" width="120" x="40" y="400" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-26" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-24" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-25" value="Request Certificate">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-24" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="Cert Manager" vertex="1">
<mxGeometry height="60" width="120" x="320" y="400" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-25" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="CA&lt;div&gt;(Let&#39;s Encrypt)&lt;/div&gt;" vertex="1">
<mxGeometry height="60" width="120" x="600" y="400" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-29" edge="1" parent="1" source="lTIulqBT4iiTOOd0l-js-28" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" target="lTIulqBT4iiTOOd0l-js-14" value="Create Internal Record">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="lTIulqBT4iiTOOd0l-js-28" parent="1" style="rounded=0;whiteSpace=wrap;html=1;" value="CoreDNS" vertex="1">
<mxGeometry height="60" width="120" x="320" y="80" as="geometry" />
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>

View File

@@ -0,0 +1,34 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
annotations:
external-dns.alpha.kubernetes.io/hostname: external-dns.reeselink.com
spec:
type: LoadBalancer
ports:
- port: 80
name: http
targetPort: 80
selector:
app: nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
name: http

View File

@@ -1,80 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns
labels:
app.kubernetes.io/name: external-dns
rules:
- apiGroups: [""]
resources: ["services","endpoints","pods","nodes"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
labels:
app.kubernetes.io/name: external-dns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: kube-system # change to desired namespace: externaldns, kube-addons
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
namespace: kube-system
labels:
app.kubernetes.io/name: external-dns
spec:
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: external-dns
template:
metadata:
labels:
app.kubernetes.io/name: external-dns
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.14.2
args:
- --source=service
- --source=ingress
- --domain-filter=reeseapps.com
- --provider=aws
- --aws-zone-type=public
- --registry=txt
# - --txt-owner-id=external-dns
env:
- name: AWS_DEFAULT_REGION
value: us-east-1 # change to region where EKS is installed
- name: AWS_SHARED_CREDENTIALS_FILE
value: /.aws/externaldns-credentials
volumeMounts:
- name: aws-credentials
mountPath: /.aws
readOnly: true
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
volumes:
- name: aws-credentials
secret:
secretName: external-dns

View File

@@ -1,8 +0,0 @@
# comment out sa if it was previously created
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: kube-system
labels:
app.kubernetes.io/name: external-dns

View File

@@ -6,7 +6,7 @@ metadata:
namespace: metallb-system
spec:
addresses:
- 10.4.1.1-10.4.3.254
- 10.4.2.32-10.4.2.47
---
apiVersion: metallb.io/v1beta1

View File

@@ -9,4 +9,4 @@ spec:
- ReadWriteOnce
resources:
requests:
storage: 32Gi
storage: 8Gi

View File

@@ -2,12 +2,17 @@ apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
annotations:
metallb.universe.tf/address-pool: "default-pool"
spec:
externalTrafficPolicy: Cluster
selector:
app: {{ .Release.Name }}
ports:
- port: {{ .Values.port }}
targetPort: 25565
name: {{ .Release.Name }}
ipFamilyPolicy: PreferDualStack
ipFamilies:
- IPv4
type: LoadBalancer
ports:
- name: minecraft
protocol: TCP
port: {{ .Values.port }}
targetPort: 25565
selector:
app.kubernetes.io/name: {{ .Release.Name }}

View File

@@ -4,7 +4,7 @@ get_server:
server_version: "1.21.3"
port: 25565
max_cpu: 4
max_ram: 8
max_ram: 2
server_props: |
enable-jmx-monitoring=false
rcon.port=25575

View File

@@ -42,7 +42,7 @@ spec:
- name: traefik-gateway
namespace: traefik
hostnames:
- "traefik-demo.reeselink.com"
- "traefik-reese.reeselink.com"
rules:
- matches:
- path:

View File

@@ -363,7 +363,8 @@ for folder in $(ls); do du --exclude .snapshots -sh $folder; done
alias {dudir,dud}='du -h --max-depth 1 | sort -h'
# Calculate all file sizes in current dir
alias {dufile,duf}='ls -lhSr'
alias {dufile,duf}='find . -name ".snapshots" -prune -o -type f -exec du -h {} + | sort -hr'
alias {dufiler,dufr}='find . -name ".snapshots" -prune -o -type f -exec du -h {} + | sort -h'
```
### Disk Wear

View File

@@ -1,3 +1,2 @@
[Network]
IPv6=true
Internal=true

View File

@@ -6,6 +6,8 @@ PublishPort=8000:8000/tcp
PublishPort=8001:8001/tcp
# llama.cpp instruct
PublishPort=8002:8002/tcp
# llama.cpp code
PublishPort=8003:8003/tcp
# stable-diffusion.cpp gen
PublishPort=1234:1234/tcp
# stable-diffusion.cpp edit

View File

@@ -34,6 +34,7 @@
- [open-webui](#open-webui)
- [lite-llm](#lite-llm)
- [Install Services with Quadlets](#install-services-with-quadlets)
- [API Keys](#api-keys)
- [Internal and External Pods](#internal-and-external-pods)
- [Llama CPP Server (Port 8000)](#llama-cpp-server-port-8000)
- [Llama CPP Embedding Server (Port 8001)](#llama-cpp-embedding-server-port-8001)
@@ -179,7 +180,11 @@ rsync -av --progress /home/ai/models/ /srv/models/
### Download models
In general I try to run 8 bit quantized minimum.
In my completely subjective opinion: 5 bit quant is usually the sweet spot for
unsloth models. Q5_K_S is usually just fine.
I usually download the F16 mmproj files. This is also completely subjective.
BF16 is fine. F32 is overkill.
#### Text models
@@ -218,8 +223,13 @@ hf download --local-dir . ggml-org/Ministral-3-3B-Instruct-2512-GGUF
##### Qwen
```bash
# qwen3.6-35b-a3b
mkdir qwen3.6-35b-a3b && cd qwen3.6-35b-a3b
hf download --local-dir . unsloth/Qwen3.6-35B-A3B-GGUF Qwen3.6-35B-A3B-UD-Q5_K_M.gguf
hf download --local-dir . unsloth/Qwen3.6-35B-A3B-GGUF mmproj-F16.gguf
# qwen3.5-27b-opus
mkdir qwen3.5-27b-opus && qwen3.5-27b-opus
mkdir qwen3.5-27b-opus && cd qwen3.5-27b-opus
hf download --local-dir . Jackrong/Qwen3.5-27B-Claude-4.6-Opus-Reasoning-Distilled-GGUF Qwen3.5-27B.Q4_K_M.gguf
hf download --local-dir . Jackrong/Qwen3.5-27B-Claude-4.6-Opus-Reasoning-Distilled-GGUF mmproj-BF16.gguf
@@ -555,6 +565,22 @@ podman run \
## Install Services with Quadlets
### API Keys
```bash
mkdir -p /home/ai/.llama-api
touch /home/ai/.llama-api/keys.env
chmod 600 /home/ai/.llama-api/keys.env
vim /home/ai/.llama-api/keys.env
LLAMA_API_KEY=
# Generate keys and append to file, then comma separate the keys
openssl rand -base64 48 >> keys.env
openssl rand -base64 48 >> keys.env
openssl rand -base64 48 >> keys.env
```
### Internal and External Pods
These will be used to restrict internet access to our llama.cpp and
@@ -562,10 +588,10 @@ stable-diffusion.cpp services while allowing the frontend services to
communicate with those containers.
```bash
scp -r active/software_ai_stack/quadlets_pods/* deskwork-ai:.config/containers/systemd/
scp -r active/software_ai_stack/ai-internal.* deskwork-ai:.config/containers/systemd/
ssh deskwork-ai
systemctl --user daemon-reload
systemctl --user start ai-internal-pod.service ai-external-pod.service
systemctl --user start ai-internal-pod.service
```
### Llama CPP Server (Port 8000)
@@ -573,7 +599,7 @@ systemctl --user start ai-internal-pod.service ai-external-pod.service
Installs the llama.cpp server to run our text models.
```bash
scp -r active/software_ai_stack/quadlets_llama_think/* deskwork-ai:.config/containers/systemd/
scp -r active/software_ai_stack/llama-think.container deskwork-ai:.config/containers/systemd/
ssh deskwork-ai
systemctl --user daemon-reload
systemctl --user restart ai-internal-pod.service
@@ -584,7 +610,7 @@ systemctl --user restart ai-internal-pod.service
Installs the llama.cpp server to run our embedding models
```bash
scp -r active/software_ai_stack/quadlets_llama_embed/* deskwork-ai:.config/containers/systemd/
scp -r active/software_ai_stack/llama-embed.container deskwork-ai:.config/containers/systemd/
ssh deskwork-ai
systemctl --user daemon-reload
systemctl --user restart ai-internal-pod.service
@@ -595,7 +621,7 @@ systemctl --user restart ai-internal-pod.service
Installs the llama.cpp server to run a constant instruct (no thinking) model for quick replies
```bash
scp -r active/software_ai_stack/quadlets_llama_instruct/* deskwork-ai:.config/containers/systemd/
scp -r active/software_ai_stack/llama-instruct.container deskwork-ai:.config/containers/systemd/
ssh deskwork-ai
systemctl --user daemon-reload
systemctl --user restart ai-internal-pod.service
@@ -711,11 +737,11 @@ Apple M4 max
export TOKEN=$(cat active/software_ai_stack/secrets/aipi-token)
# List Models
curl https://aipi.reeseapps.com/v1/models \
-H "Authorization: Bearer $TOKEN" | jq
curl https://llama-instruct.reeseapps.com/v1/models \
-H "Authorization: Bearer $TOKEN" | jq '.data'
# Text
curl https://aipi.reeseapps.com/v1/chat/completions \
curl https://llama-instruct.reeseapps.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $TOKEN" \
-d '{
@@ -724,26 +750,21 @@ curl https://aipi.reeseapps.com/v1/chat/completions \
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"}
],
"temperature": 0.7,
"max_tokens": 500
}' | jq
# Completion
curl https://aipi.reeseapps.com/v1/completions \
curl https://llama-instruct.reeseapps.com/v1/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $TOKEN" \
-d '{
"model": "llama-instruct/instruct",
"prompt": "Write a short poem about the ocean.",
"temperature": 0.7,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0
"max_tokens": 500
}' | jq
# Image Gen
curl https://aipi.reeseapps.com/v1/images/generations \
curl https://image-gen.reeselink.com/v1/images/generations \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $TOKEN" \
-d '{
@@ -766,11 +787,11 @@ curl http://aipi.reeseapps.com/v1/images/edits \
# Embed
curl \
"https://aipi.reeseapps.com/v1/embeddings" \
"https://llama-embed.reeseapps.com/v1/embeddings" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d '{
"model": "llama-embed/embed",
"model": "deskwork-embed/embed",
"input":"This is the reason you ended up here:",
"encoding_format": "float"
}'
@@ -789,16 +810,20 @@ podman run --rm \
--env "HF_TOKEN=$HF_TOKEN" \
-p 8010:8000 \
--ipc=host \
-e ROCBLAS_USE_HIPBLASLT=1 \
-e TORCH_ROCM_AOTRITON_ENABLE_EXPERIMENTAL=1 \
-e VLLM_TARGET_DEVICE=rocm \
-e HIP_FORCE_DEV_KERNARG=1 \
-e RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES=1 \
docker.io/vllm/vllm-openai-rocm:nightly \
--enable-offline-docs \
# Pick your model
Qwen/Qwen3.5-35B-A3B --max-model-len 262144 --reasoning-parser qwen3 --enable-auto-tool-choice --tool-call-parser qwen3_coder
Qwen/Qwen3.5-35B-A3B-FP8 --max-model-len 262144 --reasoning-parser qwen3 --enable-auto-tool-choice --tool-call-parser qwen3_coder
Qwen/Qwen3.5-9B --max-model-len 262144 --reasoning-parser qwen3 --enable-auto-tool-choice --tool-call-parser qwen3_coder
Qwen/Qwen3.5-35B-A3B-FP8
google/gemma-4-26B-A4B-it
openai/gpt-oss-120b
```
## Misc

View File

@@ -1,5 +1,5 @@
- name: Create Deskwork AI Stack
hosts: toybox-ai
hosts: deskwork-ai
tasks:
- name: Create /home/ai/.config/containers/systemd
ansible.builtin.file:

View File

@@ -15,7 +15,7 @@
- ai-internal.pod
- llama-embed.container
- llama-instruct.container
- llama-think.container
- llama-code.container
- name: Reload and start the ai-internal-pod service
ansible.builtin.systemd_service:
state: restarted

View File

@@ -0,0 +1,22 @@
- name: Create Deskwork AI Stack
hosts: driveripper-ai
tasks:
- name: Create /home/ai/.config/containers/systemd
ansible.builtin.file:
path: /home/ai/.config/containers/systemd
state: directory
mode: "0755"
- name: Copy Quadlets
template:
src: "{{ item }}"
dest: "/home/ai/.config/containers/systemd/{{ item }}"
loop:
- ai-internal.network
- ai-internal.pod
- llama-think.container
- name: Reload and start the ai-internal-pod service
ansible.builtin.systemd_service:
state: restarted
name: ai-internal-pod.service
daemon_reload: true
scope: user

View File

@@ -0,0 +1,49 @@
[Unit]
Description=A Llama CPP Server Running a Coding Model
[Container]
# Shared AI internal pod without internet access
Pod=ai-internal.pod
# Image is built locally via podman build
Image=localhost/llama-cpp-vulkan:latest
# Downloaded models volume
Volume=/home/ai/models/text:/models:z
# GPU Device
AddDevice=/dev/kfd
AddDevice=/dev/dri
# Server command
Exec=--port 8003 \
-c 256000 \
-n 65536 \
--temp 0.7 \
--top-p 0.8 \
--top-k 20 \
--repeat-penalty 1.05 \
--perf \
--n-gpu-layers all \
--jinja \
-m /models/qwen3-coder-30b-a3b/Qwen3-Coder-30B-A3B-Instruct-Q5_K_M.gguf \
--alias code
# Health Check
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8003/health || exit 1
HealthInterval=10s
HealthRetries=3
HealthStartPeriod=10s
HealthTimeout=30s
HealthOnFailure=kill
EnvironmentFile=/home/ai/.llama-api/keys.env
[Service]
Restart=always
# Extend Timeout to allow time to pull the image
TimeoutStartSec=900
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target

View File

@@ -2,7 +2,7 @@
Description=A Llama CPP Server For Embedding Models
[Container]
# Shared AI internal pod
# Shared AI internal pod without internet access
Pod=ai-internal.pod
# Image is built locally via podman build
@@ -18,21 +18,22 @@ AddDevice=/dev/dri
# Server command
Exec=--port 8001 \
-c 0 \
-b 1024 \
-ub 1024 \
--perf \
--n-gpu-layers all \
--models-max 1 \
--models-dir /models \
--embedding \
-m /models/qwen3-embed-4b/Qwen3-Embedding-4B-Q8_0.gguf \
-m /models/emebeddinggemma-300m/embeddinggemma-300M-BF16.gguf \
--alias embed
# Health Check
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8001/props || exit 1
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8001/health || exit 1
HealthInterval=10s
HealthRetries=3
HealthStartPeriod=10s
HealthTimeout=30s
HealthOnFailure=kill
EnvironmentFile=/home/ai/.llama-api/keys.env
[Service]
Restart=always
@@ -41,4 +42,4 @@ TimeoutStartSec=900
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target
WantedBy=multi-user.target default.target

View File

@@ -1,8 +1,8 @@
[Unit]
Description=A Llama CPP Server Running GPT OSS 120b
Description=A Llama CPP Server Running a Non-Reasoning Model
[Container]
# Shared AI internal pod
# Shared AI internal pod without internet access
Pod=ai-internal.pod
# Image is built locally via podman build
@@ -17,29 +17,31 @@ AddDevice=/dev/dri
# Server command
Exec=--port 8002 \
-c 64000 \
--perf \
-v \
--top-k 20 \
--top-p 0.8 \
--min-p 0 \
--presence-penalty 1.5 \
--repeat-penalty 1 \
-c 262144 \
-n 32768 \
--temp 0.7 \
--top-p 0.8 \
--min-p 0.0 \
--top-k 20 \
--repeat-penalty 1.0 \
--presence-penalty 1.5 \
--reasoning-budget 0 \
--perf \
--n-gpu-layers all \
--jinja \
-m /models/qwen3.6-35b-a3b/Qwen3.6-35B-A3B-UD-Q5_K_M.gguf \
--mmproj /models/qwen3.6-35b-a3b/mmproj-F16.gguf \
--chat-template-kwargs '{"enable_thinking": false}' \
-m /models/qwen3.5-35b-a3b/Qwen3.5-35B-A3B-Q8_0.gguf \
--mmproj /models/qwen3.5-35b-a3b/mmproj-F16.gguf \
--alias instruct
# Health Check
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8000/health || exit 1
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8002/health || exit 1
HealthInterval=10s
HealthRetries=3
HealthStartPeriod=10s
HealthTimeout=30s
HealthOnFailure=kill
EnvironmentFile=/home/ai/.llama-api/keys.env
[Service]
Restart=always
@@ -48,4 +50,4 @@ TimeoutStartSec=900
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target
WantedBy=multi-user.target default.target

View File

@@ -1,8 +1,8 @@
[Unit]
Description=A Llama CPP Server Running GPT OSS 120b
Description=A Llama CPP Server Running a Reasoning Model
[Container]
# Shared AI internal pod
# Shared AI internal pod without internet access
Pod=ai-internal.pod
# Image is built locally via podman build
@@ -17,16 +17,21 @@ AddDevice=/dev/dri
# Server command
Exec=--port 8000 \
-c 128000 \
--top-k 64 \
-c 262144 \
-n 32768 \
--temp 0.7 \
--top-p 0.95 \
--temp 1.0 \
--top-k 20 \
--min-p 0.0 \
--presence-penalty 0.0 \
--repeat-penalty 1.0 \
--reasoning-budget 5000 \
-fa on \
--perf \
-v \
--n-gpu-layers all \
--jinja \
-m /models/gemma-4-26b-a4b/gemma-4-26B-A4B-it-UD-Q8_K_XL.gguf \
--mmproj /models/gemma-4-26b-a4b/mmproj-BF16.gguf \
-m /models/qwen3.6-35b-a3b/Qwen3.6-35B-A3B-UD-Q5_K_M.gguf \
--mmproj /models/qwen3.6-35b-a3b/mmproj-F16.gguf \
--alias think
# Health Check
@@ -44,4 +49,4 @@ TimeoutStartSec=900
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target
WantedBy=multi-user.target default.target

View File

@@ -2,7 +2,7 @@
Description=A Stable Diffusion CPP Server for Editing Images
[Container]
# Shared AI Internal pod
# Shared AI internal pod without internet access
Pod=ai-internal.pod
# Vulkan image for AMD GPU
@@ -23,7 +23,7 @@ Exec=-l 0.0.0.0 \
--listen-port 1235 \
--diffusion-model /models/image/flux2-klein/flux-2-klein-9b-Q8_0.gguf \
--vae /models/image/flux2-klein/ae.safetensors \
--llm /models/image/flux2-klein/Qwen3-8B-Q8_0.gguf \
--llm /models/image/flux2-klein/Qwen3-8B-Q4_K_M.gguf \
-v \
--sampling-method euler \
--cfg-scale 1.0 \

View File

@@ -2,7 +2,7 @@
Description=A Stable Diffusion CPP Server for Generating Images
[Container]
# Shared AI internal pod
# Shared AI internal pod without internet access
Pod=ai-internal.pod
# Vulkan image for AMD GPU
@@ -23,7 +23,7 @@ Exec=-l 0.0.0.0 \
--listen-port 1234 \
--diffusion-model /models/image/z-turbo/z_image_turbo-Q8_0.gguf \
--vae /models/image/z-turbo/ae.safetensors \
--llm /models/image/z-turbo/Qwen3-4B-Instruct-2507-Q8_0.gguf \
--llm /models/image/z-turbo/Qwen3-4B-Instruct-2507-Q4_K_M.gguf \
-v \
--cfg-scale 1.0 \
--vae-conv-direct \

View File

@@ -1,6 +1,7 @@
# BTRFS
- [BTRFS](#btrfs)
- [Disk Usage](#disk-usage)
- [Naming Conventions](#naming-conventions)
- [Creating an Array](#creating-an-array)
- [Converting an Array Between RAID Versions](#converting-an-array-between-raid-versions)
@@ -19,6 +20,15 @@ Oracle [has decent docs here](https://docs.oracle.com/en/operating-systems/oracl
You'll also want to [read about btrfs compression](https://thelinuxcode.com/enable-btrfs-filesystem-compression/)
## Disk Usage
With compression, the actual size on disk can be obscured. Use the following
command to check the actual file size of all files in a directory.
```bash
find . -name ".snapshots" -prune -o -type f -exec du -h {} + | sort -hr
```
## Naming Conventions
`poolX` is my naming convention for data pools. `pool0` is the first pool you create.

View File

@@ -0,0 +1,4 @@
# ISCSI
## Server

View File

@@ -365,10 +365,10 @@ We'll use traefik gateway to provide ingress.
helm repo add traefik https://traefik.github.io/charts
helm repo update
# Create the traefik namespace
kubectl create namespace traefik
# Generate a selfsigned certificate valid for *.reeselink.com
mkdir active/kubernetes_traefik/secrets
openssl req -x509 -nodes -days 3650 -newkey rsa:2048 \
-keyout active/kubernetes_traefik/secrets/tls.key -out active/kubernetes_traefik/secrets/tls.crt \
-subj "/CN=*.reeselink.com"
@@ -379,29 +379,33 @@ kubectl create secret tls local-selfsigned-tls \
--namespace traefik
# Install the chart into the 'traefik' namespace
helm install traefik traefik/traefik \
helm upgrade --install traefik traefik/traefik \
--namespace traefik \
--values active/kubernetes_traefik/values.yaml
# Deploy a demo
kubectl apply -f active/kubernetes_traefik/demo-app.yaml
kubectl apply -f active/kubernetes_traefik/demo-route.yaml
```
## Longhorn Storage
Longhorn provides replicated block storage via raw files on the nodes.
On the host you need to install iscsiadm
```bash
dnf install iscsiadm
systemctl enable --now iscsid
```
```bash
helm repo add longhorn https://charts.longhorn.io
helm repo update
helm upgrade --install longhorn longhorn/longhorn \
--namespace longhorn-system \
--create-namespace \
--set "httproute.enabled=true" \
--set "httproute.parentRefs[0].name=traefik-gateway" \
--set "httproute.parentRefs[0].namespace=traefik" \
--set "httproute.hostnames[0]=longhorn.reeselink.com"
--set "persistence.defaultClassReplicaCount=1"
# Check that the route was created
kubectl get httproute longhorn-httproute -n longhorn-system -o jsonpath='{.status.parents[*].conditions}'

View File

@@ -0,0 +1,46 @@
# Opencode
## install
```bash
curl -fsSL https://opencode.ai/install | bash
```
## configure custom llama.cpp server
Opencode supports any OpenAI-compatible API. Set the following environment variables to point it at your llama.cpp server:
```bash
export OPENAI_API_KEY=""
export OPENAI_BASE_URL="http://driveripper.reeselink.com:8000/v1"
```
### persist across sessions
Add the exports to your shell profile (`~/.bashrc`, `~/.zshrc`, etc.):
```bash
echo 'export OPENAI_API_KEY=""' >> ~/.bashrc
echo 'export OPENAI_BASE_URL="http://driveripper.reeselink.com:8000/v1"' >> ~/.bashrc
source ~/.bashrc
```
### pick a model
After configuring the environment, launch opencode and select the model available from your llama.cpp instance:
```bash
opencode
```
Inside opencode, use `/model` to list available models and switch between them.
### verify the connection
Run this one-liner to confirm opencode can reach the server:
```bash
OPENAI_API_KEY="" OPENAI_BASE_URL="http://driveripper.reeselink.com:8000/v1" opencode --help
```
If no auth-related errors appear, the endpoint is reachable.

View File

@@ -86,14 +86,14 @@ dnf install openscap-scanner scap-security-guide
# Test with qemu
virt-install \
--name "fedora43-base" \
--boot uefi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no \
--cpu host-passthrough --vcpus sockets=1,cores=8,threads=2 \
--ram=8192 \
--os-variant=fedora41 \
--os-variant=fedora43 \
--network bridge:virbr0 \
--graphics none \
--console pty,target.type=virtio \
--name "fedora43-base" \
--import --disk "path=active/software_osbuild/secrets/fedora43base.qcow2,bus=virtio"
```

View File

@@ -273,7 +273,7 @@ virt-install \
--name "${VM_NAME}" \
--boot uefi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no \
--cpu host-passthrough --vcpus sockets=1,cores=8,threads=2 \
--ram=8192 \
--ram=4096 \
--os-variant=fedora41 \
--network bridge:virbr0 \
--graphics none \

View File

@@ -1,6 +1,83 @@
# Wireguard
## Install
## Manual Install
### 1. Install WireGuard
```bash
sudo dnf install -y wireguard-tools qrencode
```
### 2. Generate server keys
```bash
sudo mkdir -p /etc/wireguard
cd /etc/wireguard
sudo umask 077
sudo wg genkey | sudo tee privatekey | sudo wg pubkey | sudo tee publickey
```
### 3. Create the WireGuard config
```bash
sudo tee /etc/wireguard/wg0.conf > /dev/null <<'EOF'
[Interface]
Address = 10.10.0.1/24
ListenPort = 51820
PrivateKey = INSERT_SERVER_PRIVATE_KEY_HERE
PostUp = firewall-cmd --add-port=51820/udp
PostDown = firewall-cmd --remove-port=51820/udp
[Peer]
# Clients will be added here
EOF
```
Replace `INSERT_SERVER_PRIVATE_KEY_HERE` with the content of `/etc/wireguard/privatekey`.
### 4. Enable IP forwarding
```bash
sudo tee /etc/sysctl.d/99-wireguard.conf > /dev/null <<'EOF'
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
EOF
sudo sysctl -p /etc/sysctl.d/99-wireguard.conf
```
### 5. Start and enable WireGuard
```bash
sudo systemctl enable --now wg-quick@wg0
```
### 6. Configure firewalld
```bash
# Allow WireGuard through the firewall
sudo firewall-cmd --permanent --add-port=51820/udp
# Enable masquerading (NAT) so clients can reach the internet
sudo firewall-cmd --permanent --add-masquerade
# Allow forwarded traffic from the WireGuard subnet
sudo firewall-cmd --permanent --add-rich-rule='rule family="ipv4" source address="10.10.0.0/24" accept'
# Reload and verify
sudo firewall-cmd --reload
sudo firewall-cmd --list-all
```
### 7. Verify it's working
```bash
sudo wg
sudo wg-quick show wg0
systemctl status wg-quick@wg0
```
## Ansible Install
```bash
ansible-playbook \
@@ -37,3 +114,4 @@ read
wg set wg0 peer $PUBKEY allowed-ips 10.10.0.$WG_IP_SUFFIX/32
wg-quick down wg0 && wg-quick up wg0
```

178
active/vibe_agent/main.py Normal file
View File

@@ -0,0 +1,178 @@
import json
import subprocess
from typing import Iterable
from openai import OpenAI
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionToolUnionParam
client = OpenAI(base_url="https://llama-cpp.reeselink.com", api_key="")
class ToolCallController:
def __init__(self, max_tool_calls=10):
self.max_tool_calls = max_tool_calls
self.tool_call_count = 0
def is_tool_call_allowed(self):
return self.tool_call_count < self.max_tool_calls
def increment(self):
self.tool_call_count += 1
def reset(self):
self.tool_call_count = 0
# Register tools
tools: Iterable[ChatCompletionToolUnionParam] = [
{
"type": "function",
"function": {
"name": "list_servers",
"description": "Lists the available servers to perform operations on",
"parameters": {
"type": "object",
"properties": {"server_id": {"type": "string", "enum": ["all"]}},
"required": ["server_id"],
},
},
},
{
"type": "function",
"function": {
"name": "check_updates",
"description": "Check if a given server needs updated.",
"parameters": {
"type": "object",
"properties": {
"server_id": {
"type": "string",
}
},
"required": ["server_id"],
},
},
},
{
"type": "function",
"function": {
"name": "perform_updates",
"description": "Update a given server to the latest package versions. Does not reboot automatically.",
"parameters": {
"type": "object",
"properties": {
"server_id": {
"type": "string",
}
},
"required": ["server_id"],
},
},
},
{
"type": "function",
"function": {
"name": "reboot",
"description": "Reboot a given server. Waits for server to be responsive again.",
"parameters": {
"type": "object",
"properties": {
"server_id": {
"type": "string",
}
},
"required": ["server_id"],
},
},
},
]
def list_servers() -> str:
return ",".join(["ignite"])
def check_updates(server_id: str):
command_result = subprocess.run(
["ssh", server_id, "dnf", "check-update"], capture_output=True
)
output = command_result.stdout.decode()
return output
def perform_updates(server_id: str):
return f"Successfully updates {server_id}. Reboot required."
def reboot(server_id: str):
return f"Rebooted {server_id} successfully."
def execute_tool(tool_name, arguments):
if tool_name == "check_updates":
return check_updates(**arguments)
elif tool_name == "list_servers":
return list_servers()
elif tool_name == "perform_updates":
return perform_updates(**arguments)
elif tool_name == "reboot":
return reboot(**arguments)
raise ValueError(f"Unknown tool: {tool_name}")
def run_conversation(user_message: str, max_tool_calls=10):
print("Processing initial message")
controller = ToolCallController(max_tool_calls=max_tool_calls)
messages: Iterable[ChatCompletionMessageParam] = [
{
"role": "system",
"content": "You are a system administrator with access to a variety of administrator tools.",
}
]
messages.append({"role": "user", "content": user_message})
while True:
if not controller.is_tool_call_allowed():
messages.append(
{
"role": "user",
"content": "You've reached the maximum number of tool calls. Please summarize based on available information.",
}
)
break
response = client.chat.completions.create(
model="qwen3.5-35b-a3b", messages=messages, tools=tools, tool_choice="auto"
)
message = response.choices[0].message
messages.append(message)
if message.tool_calls:
for tool_call in message.tool_calls:
controller.increment()
tool_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
print(f"Attempting to call {tool_name} with arguments {arguments}...")
result = execute_tool(tool_name, arguments)
messages.append(
{
"role": "tool",
"tool_call_id": tool_call.id,
"content": str(result),
}
)
else:
break
return messages[-1].content
# Example usage
print(
run_conversation(
"Can you check if any of my servers need updates and update them? If they need rebooted, reboot them."
)
)

300
active/vibe_agent/main1.py Normal file
View File

@@ -0,0 +1,300 @@
import json
import subprocess
from typing import Iterable
from openai import BadRequestError, OpenAI
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionToolUnionParam
client = OpenAI(base_url="https://llama-cpp.reeselink.com", api_key="")
"""
Tool Ideas:
- TTS
- Self DDOS
- Make a funny little guy
- Make slopbox a vtuber
- Install arbitrary package
- Open random ports in the firewall
- Change system language
- Solve a tool issue
- Read man page
- Read woman page
- Read nonbinary page
- Download a youtube video
- ffmpeg video to gif
- inflict damage on computer
- Make fanart and fanfiction of our dnd characters
- Identify potential issues in log files
- become depressed
- Function that switches system prompts
- Pick patrick's optimal food choices for a day
- Play the fish and a rice cake video (https://www.youtube.com/watch?v=uYHAR8Xzsyo)
- Take a screenshot of the desktop, set mouse coordinates, take an action (mouse/keyboard)
- Order radioactive material online and deliver it to botswana
- How do we feel about Polaris Mall?
- rm -rf Polaris Mall management's brain stem
- Make the speakers output the "fish.mp3" at max volume
"""
class ToolCallController:
def __init__(self, max_tool_calls=10):
self.max_tool_calls = max_tool_calls
self.tool_call_count = 0
def is_tool_call_allowed(self):
return self.tool_call_count < self.max_tool_calls
def increment(self):
self.tool_call_count += 1
def reset(self):
self.tool_call_count = 0
# Register tools
tools: Iterable[ChatCompletionToolUnionParam] = [
{
"type": "function",
"function": {
"name": "list_servers",
"description": "Lists the available servers to perform operations on",
"parameters": {
"type": "object",
"properties": {"server_id": {"type": "string", "enum": ["all"]}},
"required": ["server_id"],
},
},
},
{
"type": "function",
"function": {
"name": "check_updates",
"description": "Check if a given server needs updated.",
"parameters": {
"type": "object",
"properties": {
"server_id": {
"type": "string",
}
},
"required": ["server_id"],
},
},
},
{
"type": "function",
"function": {
"name": "perform_updates",
"description": "Update a given server to the latest package versions. Does not reboot automatically.",
"parameters": {
"type": "object",
"properties": {
"server_id": {
"type": "string",
}
},
"required": ["server_id"],
},
},
},
{
"type": "function",
"function": {
"name": "reboot",
"description": "Reboot a given server. Waits for server to be responsive again.",
"parameters": {
"type": "object",
"properties": {
"server_id": {
"type": "string",
}
},
"required": ["server_id"],
},
},
},
{
"type": "function",
"function": {
"name": "install_package",
"description": "Install a given package using `dnf` on a Fedora server.",
"parameters": {
"type": "object",
"properties": {
"package_name": {
"type": "string",
},
"server_id": {
"type": "string",
},
},
"required": ["package_name", "server_id"],
},
},
},
{
"type": "function",
"function": {
"name": "arbitrary_shell",
"description": "Run any shell command in a bash shell as root.",
"parameters": {
"type": "object",
"properties": {
"command_string": {
"type": "string",
},
"server_id": {
"type": "string",
},
},
"required": ["command_string", "server_id"],
},
},
},
]
##### FUNCTION DEFS #####
def list_servers() -> str:
return ",".join(["ignite"])
def check_updates(server_id: str):
command_result = subprocess.run(
["ssh", server_id, "dnf", "check-update"], capture_output=True
)
output = command_result.stdout.decode()
return output
def perform_updates(server_id: str):
return f"Successfully updates {server_id}. Reboot required."
def reboot(server_id: str):
return f"Rebooted {server_id} successfully."
def install_package(package_name: str, server_id: str) -> str:
command_result = subprocess.run(
["ssh", server_id, "dnf", "install", "-y", package_name], capture_output=True
)
output = f"STDOUT:\n{command_result.stdout.decode()}\n\nSTDERR:\n{command_result.stderr.decode()}"
return output
def arbitrary_shell(command_string: str, server_id: str) -> str:
try:
command_result = subprocess.run(
["ssh", server_id, "bash", "-c", command_string],
capture_output=True,
timeout=30,
)
output = command_result.stdout.decode()
except subprocess.TimeoutExpired:
output = "Command took too long and timed out."
return output
##### TOOL ROUTER #####
def execute_tool(tool_name, arguments):
if tool_name == "check_updates":
return check_updates(**arguments)
elif tool_name == "list_servers":
return list_servers()
elif tool_name == "perform_updates":
return perform_updates(**arguments)
elif tool_name == "reboot":
return reboot(**arguments)
elif tool_name == "install_package":
return install_package(**arguments)
elif tool_name == "arbitrary_shell":
return arbitrary_shell(**arguments)
raise ValueError(f"Unknown tool: {tool_name}")
##### CONVERSATION #####
def run_conversation(user_message: str, max_tool_calls=100):
print("Processing initial message")
controller = ToolCallController(max_tool_calls=max_tool_calls)
messages: Iterable[ChatCompletionMessageParam] = [
{
"role": "system",
"content": "You are a system administrator with access to a variety of administrator tools.",
}
]
messages.append({"role": "user", "content": user_message})
while True:
if not controller.is_tool_call_allowed():
messages.append(
{
"role": "user",
"content": "You've reached the maximum number of tool calls. Please summarize based on available information.",
}
)
break
try:
response = client.chat.completions.create(
model="qwen3.5-35b-a3b",
messages=messages,
tools=tools,
tool_choice="auto",
)
except BadRequestError:
print("Request over context limit, removing last message...")
messages.pop()
messages.append(
{
"role": "tool",
"tool_call_id": tool_call.id,
"content": "This tool call resulted in data that exceeded the context length limit.",
}
)
continue
print()
print(response.choices[0].message)
message = response.choices[0].message
messages.append(message)
if message.tool_calls:
for tool_call in message.tool_calls:
controller.increment()
tool_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
print(f"Attempting to call {tool_name} with arguments {arguments}...")
result = execute_tool(tool_name, arguments)
messages.append(
{
"role": "tool",
"tool_call_id": tool_call.id,
"content": str(result),
}
)
else:
break
try:
return messages[-1]["content"]
except TypeError:
return messages[-1].content
# Example usage
print(
run_conversation(
"Install and set up a postgres server on all available servers. Open the firewall ports necessary. Add a default user with a simple password and tell me what the password is."
)
)

View File

@@ -0,0 +1,81 @@
import asyncio
from fastapi import FastAPI, Request
from fastapi.responses import Response
from mcp.server import Server
from mcp.server.sse import SseServerTransport
from starlette.responses import StreamingResponse
# 1. Initialize the MCP Server logic
# This is where you define your tools, resources, and prompts
mcp_server = Server("my-remote-server")
@mcp_server.list_tools()
async def handle_list_tools():
"""List available tools."""
return [
{
"name": "get_weather",
"description": "Get the current weather for a location",
"inputSchema": {
"type": "object",
"properties": {
"location": {"type": "string"},
},
"required": ["location"],
},
}
]
@mcp_server.call_tool()
async def handle_call_tool(name: str, arguments: dict):
"""Handle tool execution."""
if name == "get_weather":
location = arguments.get("location", "Unknown")
# In a real app, call an actual Weather API here
return [
{"type": "text", "text": f"The weather in {location} is sunny and 25°C."}
]
raise ValueError(f"Tool not found: {name}")
# 2. Initialize FastAPI
app = FastAPI(title="Remote MCP Server")
# 3. Create the SSE Transport layer
# This object manages the connection between the web and the MCP protocol
sse = SseServerTransport("/messages")
@app.get("/sse")
async def sse_endpoint(request: Request):
"""
The client connects here to start the SSE stream.
The server will push messages to the client through this connection.
"""
async with sse.connect_sse(request.scope, request.receive, request._send) as (
read_stream,
write_stream,
):
# We run the MCP server using the streams provided by the SSE transport
await mcp_server.run(
read_stream, write_stream, mcp_server.create_initialization_options()
)
@app.post("/messages")
async def messages_endpoint(request: Request):
"""
The client sends JSON-RPC messages (tool calls, etc.)
via POST requests to this endpoint.
"""
await sse.handle_post_message(request.scope, request.receive, request._send)
return Response(status_code=202)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)

View File

@@ -7,7 +7,7 @@ from email.message import EmailMessage
from pathlib import Path
from typing import Iterable, TypedDict, cast
from dotenv import dotenv_values
from dotenv import dotenv_values, load_dotenv
from openai import OpenAI
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionToolUnionParam
@@ -320,7 +320,9 @@ def run_conversation(user_message: str, max_tool_calls=10):
if __name__ == "__main__":
client = OpenAI(base_url="https://llama-cpp.reeselink.com", api_key="")
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY", "")
client = OpenAI(base_url="https://llama-think.reeselink.com", api_key=api_key)
# Example usage
print(
run_conversation(