Add model selection to OpenAI payloads

This commit is contained in:
Jordan Wages 2026-01-30 02:11:08 -06:00
commit 35aadfac5a
8 changed files with 134 additions and 9 deletions

View file

@ -34,6 +34,7 @@ There are currently no automated tests for this project. If you add tests in the
## Endpoint Notes
Sortana targets the `/v1/completions` API. The endpoint value stored in settings is a base URL; the full request URL is constructed by appending `/v1/completions` (adding a slash when needed) and defaulting to `https://` if no scheme is provided.
The options page can query `/v1/models` from the same base URL to populate the Model dropdown; selecting **None** omits the `model` field from the request payload.
Responses are expected to include a JSON object with `match` (or `matched`) plus a short `reason` string; the parser extracts the last JSON object in the response text and ignores any surrounding commentary.
## Documentation

View file

@ -14,6 +14,7 @@ expecting a `match` (or `matched`) boolean plus a `reason` string.
## Features
- **Configurable endpoint** set the classification service base URL on the options page.
- **Model selection** load available models from the endpoint and choose one (or omit the model field).
- **Prompt templates** choose between OpenAI/ChatML, Qwen, Mistral, Harmony (gpt-oss), or provide your own custom template.
- **Custom system prompts** tailor the instructions sent to the model for more precise results.
- **Persistent result caching** classification results and reasoning are saved to disk so messages aren't re-evaluated across restarts.
@ -79,7 +80,8 @@ Sortana is implemented entirely with standard WebExtension scripts—no custom e
## Usage
1. Open the add-on's options and set the base URL of your classification service
(Sortana will append `/v1/completions`).
(Sortana will append `/v1/completions`). Use the Model dropdown to load
`/v1/models` and select a model or choose **None** to omit the `model` field.
2. Use the **Classification Rules** section to add a criterion and optional
actions such as tagging, moving, copying, forwarding, replying,
deleting or archiving a message when it matches. Drag rules to

View file

@ -484,7 +484,7 @@ async function clearCacheForMessages(idsInput) {
}
try {
const store = await storage.local.get(["endpoint", "templateName", "customTemplate", "customSystemPrompt", "aiParams", "debugLogging", "htmlToMarkdown", "stripUrlParams", "altTextImages", "collapseWhitespace", "tokenReduction", "aiRules", "theme", "showDebugTab"]);
const store = await storage.local.get(["endpoint", "model", "templateName", "customTemplate", "customSystemPrompt", "aiParams", "debugLogging", "htmlToMarkdown", "stripUrlParams", "altTextImages", "collapseWhitespace", "tokenReduction", "aiRules", "theme", "showDebugTab"]);
logger.setDebug(store.debugLogging);
await AiClassifier.setConfig(store);
userTheme = store.theme || 'auto';
@ -514,9 +514,10 @@ async function clearCacheForMessages(idsInput) {
aiRules = normalizeRules(newRules);
logger.aiLog("aiRules updated from storage change", { debug: true }, aiRules);
}
if (changes.endpoint || changes.templateName || changes.customTemplate || changes.customSystemPrompt || changes.aiParams || changes.debugLogging) {
if (changes.endpoint || changes.model || changes.templateName || changes.customTemplate || changes.customSystemPrompt || changes.aiParams || changes.debugLogging) {
const config = {};
if (changes.endpoint) config.endpoint = changes.endpoint.newValue;
if (changes.model) config.model = changes.model.newValue;
if (changes.templateName) config.templateName = changes.templateName.newValue;
if (changes.customTemplate) config.customTemplate = changes.customTemplate.newValue;
if (changes.customSystemPrompt) config.customSystemPrompt = changes.customSystemPrompt.newValue;

View file

@ -1,7 +1,7 @@
{
"manifest_version": 2,
"name": "Sortana",
"version": "2.2.0",
"version": "2.4.0",
"default_locale": "en-US",
"applications": {
"gecko": {

View file

@ -16,6 +16,7 @@ try {
}
const COMPLETIONS_PATH = "/v1/completions";
const MODELS_PATH = "/v1/models";
const SYSTEM_PREFIX = `You are an email-classification assistant.
Read the email below and the classification criterion provided by the user.
@ -38,6 +39,7 @@ let gCustomSystemPrompt = DEFAULT_CUSTOM_SYSTEM_PROMPT;
let gTemplateText = "";
let gAiParams = Object.assign({}, DEFAULT_AI_PARAMS);
let gModel = "";
let gCache = new Map();
let gCacheLoaded = false;
@ -50,7 +52,7 @@ function normalizeEndpointBase(endpoint) {
if (!base) {
return "";
}
base = base.replace(/\/v1\/completions\/?$/i, "");
base = base.replace(/\/v1\/(completions|models)\/?$/i, "");
return base;
}
@ -61,7 +63,19 @@ function buildEndpointUrl(endpointBase) {
}
const withScheme = /^https?:\/\//i.test(base) ? base : `https://${base}`;
const needsSlash = withScheme.endsWith("/");
return `${withScheme}${needsSlash ? "" : "/"}v1/completions`;
const path = COMPLETIONS_PATH.replace(/^\//, "");
return `${withScheme}${needsSlash ? "" : "/"}${path}`;
}
function buildModelsUrl(endpointBase) {
const base = normalizeEndpointBase(endpointBase);
if (!base) {
return "";
}
const withScheme = /^https?:\/\//i.test(base) ? base : `https://${base}`;
const needsSlash = withScheme.endsWith("/");
const path = MODELS_PATH.replace(/^\//, "");
return `${withScheme}${needsSlash ? "" : "/"}${path}`;
}
function sha256HexSync(str) {
@ -206,6 +220,9 @@ async function setConfig(config = {}) {
}
}
}
if (typeof config.model === "string") {
gModel = config.model.trim();
}
if (typeof config.debugLogging === "boolean") {
setDebug(config.debugLogging);
}
@ -263,6 +280,9 @@ function buildPayload(text, criterion) {
let payloadObj = Object.assign({
prompt: buildPrompt(text, criterion)
}, gAiParams);
if (gModel) {
payloadObj.model = gModel;
}
return JSON.stringify(payloadObj);
}
@ -457,4 +477,4 @@ async function init() {
await loadCache();
}
export { buildEndpointUrl, normalizeEndpointBase, classifyText, setConfig, removeCacheEntries, clearCache, getReason, getCachedResult, buildCacheKey, getCacheSize, init };
export { buildEndpointUrl, buildModelsUrl, normalizeEndpointBase, classifyText, setConfig, removeCacheEntries, clearCache, getReason, getCachedResult, buildCacheKey, getCacheSize, init };

View file

@ -3,6 +3,7 @@ const storage = (globalThis.messenger ?? browser).storage;
const KEY_GROUPS = {
settings: [
'endpoint',
'model',
'templateName',
'customTemplate',
'customSystemPrompt',

View file

@ -77,6 +77,21 @@
<p class="help" id="endpoint-preview"></p>
</div>
<div class="field">
<label class="label" for="model-select">Model</label>
<div class="field has-addons">
<div class="control is-expanded">
<div class="select is-fullwidth">
<select id="model-select"></select>
</div>
</div>
<div class="control">
<button class="button" id="refresh-models" type="button">Refresh</button>
</div>
</div>
<p class="help" id="model-help"></p>
</div>
<div class="field">
<label class="label" for="template">Prompt template</label>
<div class="control">

View file

@ -10,6 +10,7 @@ document.addEventListener('DOMContentLoaded', async () => {
'templateName',
'customTemplate',
'customSystemPrompt',
'model',
'aiParams',
'debugLogging',
'htmlToMarkdown',
@ -100,6 +101,88 @@ document.addEventListener('DOMContentLoaded', async () => {
endpointInput.addEventListener('input', updateEndpointPreview);
updateEndpointPreview();
const modelSelect = document.getElementById('model-select');
const refreshModelsBtn = document.getElementById('refresh-models');
const modelHelp = document.getElementById('model-help');
const storedModel = typeof defaults.model === 'string' ? defaults.model : '';
function setModelHelp(message = '', isError = false) {
if (!modelHelp) return;
modelHelp.textContent = message;
modelHelp.classList.toggle('is-danger', isError);
}
function populateModelOptions(models = [], selectedModel = '') {
if (!modelSelect) return;
const modelIds = Array.isArray(models) ? models.filter(Boolean) : [];
modelSelect.innerHTML = '';
const noneOpt = document.createElement('option');
noneOpt.value = '';
noneOpt.textContent = 'None (omit model)';
modelSelect.appendChild(noneOpt);
if (selectedModel && !modelIds.includes(selectedModel)) {
const storedOpt = document.createElement('option');
storedOpt.value = selectedModel;
storedOpt.textContent = `Stored: ${selectedModel}`;
modelSelect.appendChild(storedOpt);
}
for (const id of modelIds) {
const opt = document.createElement('option');
opt.value = id;
opt.textContent = id;
modelSelect.appendChild(opt);
}
const hasSelected = [...modelSelect.options].some(opt => opt.value === selectedModel);
modelSelect.value = hasSelected ? selectedModel : '';
}
async function fetchModels(preferredModel = '') {
if (!modelSelect || !refreshModelsBtn) return;
const modelsUrl = AiClassifier.buildModelsUrl(endpointInput.value);
if (!modelsUrl) {
setModelHelp('Set a valid endpoint to load models.', true);
populateModelOptions([], preferredModel || modelSelect.value);
return;
}
refreshModelsBtn.disabled = true;
setModelHelp('Loading models...');
try {
const response = await fetch(modelsUrl, { method: 'GET' });
if (!response.ok) {
throw new Error(`HTTP ${response.status}`);
}
const data = await response.json();
let models = [];
if (Array.isArray(data?.data)) {
models = data.data.map(model => model?.id ?? model?.name ?? model?.model ?? '').filter(Boolean);
} else if (Array.isArray(data?.models)) {
models = data.models.map(model => model?.id ?? model?.name ?? model?.model ?? '').filter(Boolean);
} else if (Array.isArray(data)) {
models = data.map(model => model?.id ?? model?.name ?? model?.model ?? model).filter(Boolean);
}
models = [...new Set(models)];
populateModelOptions(models, preferredModel || modelSelect.value);
setModelHelp(models.length ? `Loaded ${models.length} model${models.length === 1 ? '' : 's'}.` : 'No models returned.');
} catch (e) {
logger.aiLog('[options] failed to load models', { level: 'warn' }, e);
setModelHelp('Failed to load models. Check the endpoint and network.', true);
populateModelOptions([], preferredModel || modelSelect.value);
} finally {
refreshModelsBtn.disabled = false;
}
}
populateModelOptions([], storedModel);
refreshModelsBtn?.addEventListener('click', () => {
fetchModels(modelSelect.value);
});
const templates = {
openai: browser.i18n.getMessage('template.openai'),
qwen: browser.i18n.getMessage('template.qwen'),
@ -276,6 +359,7 @@ document.addEventListener('DOMContentLoaded', async () => {
await loadErrors();
updateDiffDisplay();
await fetchModels(storedModel);
[htmlToggle, stripUrlToggle, altTextToggle, collapseWhitespaceToggle, tokenReductionToggle].forEach(toggle => {
toggle.addEventListener('change', () => {
@ -914,6 +998,7 @@ document.addEventListener('DOMContentLoaded', async () => {
document.getElementById('save').addEventListener('click', async () => {
const endpoint = endpointInput.value.trim();
const model = modelSelect?.value || '';
const templateName = templateSelect.value;
const customTemplateText = customTemplate.value;
const customSystemPrompt = systemBox.value;
@ -979,10 +1064,10 @@ document.addEventListener('DOMContentLoaded', async () => {
const tokenReduction = tokenReductionToggle.checked;
const showDebugTab = debugTabToggle.checked;
const theme = themeSelect.value;
await storage.local.set({ endpoint, templateName, customTemplate: customTemplateText, customSystemPrompt, aiParams: aiParamsSave, debugLogging, htmlToMarkdown, stripUrlParams, altTextImages, collapseWhitespace, tokenReduction, aiRules: rules, theme, showDebugTab });
await storage.local.set({ endpoint, model, templateName, customTemplate: customTemplateText, customSystemPrompt, aiParams: aiParamsSave, debugLogging, htmlToMarkdown, stripUrlParams, altTextImages, collapseWhitespace, tokenReduction, aiRules: rules, theme, showDebugTab });
await applyTheme(theme);
try {
await AiClassifier.setConfig({ endpoint, templateName, customTemplate: customTemplateText, customSystemPrompt, aiParams: aiParamsSave, debugLogging });
await AiClassifier.setConfig({ endpoint, model, templateName, customTemplate: customTemplateText, customSystemPrompt, aiParams: aiParamsSave, debugLogging });
logger.setDebug(debugLogging);
} catch (e) {
logger.aiLog('[options] failed to apply config', {level: 'error'}, e);