Staging has official support now.

How 2 use

  • Apply from patch 68234cdcefad5ff70c229d578bce64fa810acb21
  • Change config.yaml to have claude: thinkingBudget: 16000
  • Set <think> as prefix + </think> as suffix in Reasoning formatting
  • Up your output tokens to your thinkingBudget plus actual desired output length
  • Turn off streaming
  • Make sure you turned off your prefill (doesn't work with thinking lol)

NEW VERSION (SUPPORTS STREAMING, DOESN'T REQUIRE REASONING SETTING TINKERING, ADDS THE MODEL TO YOUR ST DROPDOWN)

Make sure you unmerge the previous code before applying this code

diff --git a/public/index.html b/public/index.html
index 7a49adc3c..9860cedd1 100644
--- a/public/index.html
+++ b/public/index.html
@@ -2000,7 +2000,7 @@
                                             </span>
                                         </div>
                                     </div>
-                                    <div class="range-block" data-source="deepseek,openrouter,custom">
+                                    <div class="range-block" data-source="deepseek,openrouter,claude,custom">
                                         <label for="openai_show_thoughts" class="checkbox_label widthFreeExpand">
                                             <input id="openai_show_thoughts" type="checkbox" />
                                             <span>
@@ -2915,6 +2915,7 @@
                                 <h4 data-i18n="Claude Model">Claude Model</h4>
                                 <select id="model_claude_select">
                                     <optgroup label="Versions">
+                                        <option value="claude-3-7-sonnet-20250219">claude-3-7-sonnet-20250219</option>
                                         <option value="claude-3-5-sonnet-latest">claude-3-5-sonnet-latest</option>
                                         <option value="claude-3-5-sonnet-20241022">claude-3-5-sonnet-20241022</option>
                                         <option value="claude-3-5-sonnet-20240620">claude-3-5-sonnet-20240620</option>
diff --git a/public/scripts/openai.js b/public/scripts/openai.js
index 2c62458e9..371292e06 100644
--- a/public/scripts/openai.js
+++ b/public/scripts/openai.js
@@ -2149,6 +2149,9 @@ async function sendOpenAIRequest(type, messages, signal) {
  */
 function getStreamingReply(data, state) {
     if (oai_settings.chat_completion_source === chat_completion_sources.CLAUDE) {
+        if (oai_settings.show_thoughts) {
+            state.reasoning += (data?.delta?.thinking || '');
+        }
         return data?.delta?.text || '';
     } else if (oai_settings.chat_completion_source === chat_completion_sources.MAKERSUITE) {
         if (oai_settings.show_thoughts) {
diff --git a/public/scripts/reasoning.js b/public/scripts/reasoning.js
index cf9a76352..797b53e0d 100644
--- a/public/scripts/reasoning.js
+++ b/public/scripts/reasoning.js
@@ -70,6 +70,8 @@ export function extractReasoningFromData(data) {
             if (!oai_settings.show_thoughts) break;

             switch (oai_settings.chat_completion_source) {
+                case chat_completion_sources.CLAUDE:
+                    return data?.content?.[0]?.thinking ?? '';
                 case chat_completion_sources.DEEPSEEK:
                     return data?.choices?.[0]?.message?.reasoning_content ?? '';
                 case chat_completion_sources.OPENROUTER:
diff --git a/public/scripts/sse-stream.js b/public/scripts/sse-stream.js
index 17a31b567..cd127104c 100644
--- a/public/scripts/sse-stream.js
+++ b/public/scripts/sse-stream.js
@@ -121,14 +121,17 @@ async function* parseStreamData(json) {
         return;
     }
     // Claude
-    else if (typeof json.delta === 'object' && typeof json.delta.text === 'string') {
-        if (json.delta.text.length > 0) {
-            for (let i = 0; i < json.delta.text.length; i++) {
-                const str = json.delta.text[i];
-                yield {
-                    data: { ...json, delta: { text: str } },
+    else if (typeof json.delta === 'object' && (typeof json.delta.text === 'string' || typeof json.delta.thinking === 'string')) {
+       delta_type = json.delta.type.split('_')[0] // either thinking_delta or text_delta
+        if (json.delta[delta_type].length > 0) {
+            for (let i = 0; i < json.delta[delta_type].length; i++) {
+                const str = json.delta[delta_type][i];
+                let chunky = {
+                    data: { ...json, delta: { } },
                     chunk: str,
                 };
+                chunky.data.delta[delta_type] = str;
+                yield chunky;
             }
         }
         return;
diff --git a/src/endpoints/backends/chat-completions.js b/src/endpoints/backends/chat-completions.js
index 018a25828..ed89f96d8 100644
--- a/src/endpoints/backends/chat-completions.js
+++ b/src/endpoints/backends/chat-completions.js
@@ -107,6 +107,7 @@ async function sendClaudeRequest(request, response) {
     const apiKey = request.body.reverse_proxy ? request.body.proxy_password : readSecret(request.user.directories, SECRET_KEYS.CLAUDE);
     const divider = '-'.repeat(process.stdout.columns);
     const enableSystemPromptCache = getConfigValue('claude.enableSystemPromptCache', false) && request.body.model.startsWith('claude-3');
+    const thinkingBudget = getConfigValue('claude.thinkingBudget', 0)
     let cachingAtDepth = getConfigValue('claude.cachingAtDepth', -1);
     // Disabled if not an integer or negative, or if the model doesn't support it
     if (!Number.isInteger(cachingAtDepth) || cachingAtDepth < 0 || !request.body.model.startsWith('claude-3')) {
@@ -125,6 +126,7 @@ async function sendClaudeRequest(request, response) {
             controller.abort();
         });
         const additionalHeaders = {};
+        const useThinking = thinkingBudget > 0 && request.body.model.startsWith('claude-3-7')
         const useTools = request.body.model.startsWith('claude-3') && Array.isArray(request.body.tools) && request.body.tools.length > 0;
         const useSystemPrompt = (request.body.model.startsWith('claude-2') || request.body.model.startsWith('claude-3')) && request.body.claude_use_sysprompt;
         const convertedPrompt = convertClaudeMessages(request.body.messages, request.body.assistant_prefill, useSystemPrompt, useTools, getPromptNames(request));
@@ -145,6 +147,13 @@ async function sendClaudeRequest(request, response) {
             top_k: request.body.top_k,
             stream: request.body.stream,
         };
+        if (useThinking) {
+            requestBody.thinking = { type: "enabled", "budget_tokens": thinkingBudget };
+            requestBody.temperature = 1;
+            requestBody.max_tokens += thinkingBudget;
+            delete requestBody.top_p;
+            delete requestBody.top_k;
+        }
         if (useSystemPrompt) {
             if (enableSystemPromptCache && Array.isArray(convertedPrompt.systemPrompt) && convertedPrompt.systemPrompt.length) {
                 convertedPrompt.systemPrompt[convertedPrompt.systemPrompt.length - 1]['cache_control'] = { type: 'ephemeral' };
@@ -205,7 +214,7 @@ async function sendClaudeRequest(request, response) {

             /** @type {any} */
             const generateResponseJson = await generateResponse.json();
-            const responseText = generateResponseJson?.content?.[0]?.text || '';
+            const responseText = generateResponseJson?.content?.[useThinking ? 1 : 0]?.text || '';
             console.debug('Claude response:', generateResponseJson);

             // Wrap it back to OAI format + save the original content

Old version (less intrusive but requires more)

diff --git a/src/endpoints/backends/chat-completions.js b/src/endpoints/backends/chat-completions.js
index 018a25828..5fd818688 100644
--- a/src/endpoints/backends/chat-completions.js
+++ b/src/endpoints/backends/chat-completions.js
@@ -107,6 +107,7 @@ async function sendClaudeRequest(request, response) {
     const apiKey = request.body.reverse_proxy ? request.body.proxy_password : readSecret(request.user.directories, SECRET_KEYS.CLAUDE);
     const divider = '-'.repeat(process.stdout.columns);
     const enableSystemPromptCache = getConfigValue('claude.enableSystemPromptCache', false) && request.body.model.startsWith('claude-3');
+    const thinkingBudget = getConfigValue('claude.thinkingBudget', 0)
     let cachingAtDepth = getConfigValue('claude.cachingAtDepth', -1);
     // Disabled if not an integer or negative, or if the model doesn't support it
     if (!Number.isInteger(cachingAtDepth) || cachingAtDepth < 0 || !request.body.model.startsWith('claude-3')) {
@@ -125,6 +126,7 @@ async function sendClaudeRequest(request, response) {
             controller.abort();
         });
         const additionalHeaders = {};
+        const useThinking = thinkingBudget > 0 && request.body.model.startsWith('claude-3-7')
         const useTools = request.body.model.startsWith('claude-3') && Array.isArray(request.body.tools) && request.body.tools.length > 0;
         const useSystemPrompt = (request.body.model.startsWith('claude-2') || request.body.model.startsWith('claude-3')) && request.body.claude_use_sysprompt;
         const convertedPrompt = convertClaudeMessages(request.body.messages, request.body.assistant_prefill, useSystemPrompt, useTools, getPromptNames(request));
@@ -145,6 +147,12 @@ async function sendClaudeRequest(request, response) {
             top_k: request.body.top_k,
             stream: request.body.stream,
         };
+        if (useThinking) {
+            requestBody.thinking = { type: "enabled", "budget_tokens": thinkingBudget }
+            requestBody.temperature = 1
+            delete requestBody.top_p;
+            delete requestBody.top_k;
+        }
         if (useSystemPrompt) {
             if (enableSystemPromptCache && Array.isArray(convertedPrompt.systemPrompt) && convertedPrompt.systemPrompt.length) {
                 convertedPrompt.systemPrompt[convertedPrompt.systemPrompt.length - 1]['cache_control'] = { type: 'ephemeral' };
@@ -205,7 +213,12 @@ async function sendClaudeRequest(request, response) {

             /** @type {any} */
             const generateResponseJson = await generateResponse.json();
-            const responseText = generateResponseJson?.content?.[0]?.text || '';
+            let responseText = '';
+            if (!useThinking) {
+                responseText = generateResponseJson?.content?.[0]?.text || '';
+            } else {
+                responseText = '<think>' + generateResponseJson?.content?.[0]?.thinking + '</think>' + generateResponseJson?.content?.[1]?.text
+            }
             console.debug('Claude response:', generateResponseJson);

             // Wrap it back to OAI format + save the original content

(it's called paprika btw)

Edit
Pub: 24 Feb 2025 20:29 UTC
Edit: 25 Feb 2025 00:34 UTC
Views: 623