SmolLLM

LLM utility library

当前为 2025-03-04 提交的版本,查看 最新版本

此脚本不应直接安装。它是供其他脚本使用的外部库,要使用该库请加入元指令 // @require https://update.cn-greasyfork.org/scripts/528704/1546692/SmolLLM.js

您需要先安装一个扩展,例如 篡改猴Greasemonkey暴力猴,之后才能安装此脚本。

You will need to install an extension such as Tampermonkey to install this script.

您需要先安装一个扩展,例如 篡改猴暴力猴,之后才能安装此脚本。

您需要先安装一个扩展,例如 篡改猴Userscripts ,之后才能安装此脚本。

您需要先安装一款用户脚本管理器扩展,例如 Tampermonkey,才能安装此脚本。

您需要先安装用户脚本管理器扩展后才能安装此脚本。

(我已经安装了用户脚本管理器,让我安装!)

您需要先安装一款用户样式管理器扩展,比如 Stylus,才能安装此样式。

您需要先安装一款用户样式管理器扩展,比如 Stylus,才能安装此样式。

您需要先安装一款用户样式管理器扩展,比如 Stylus,才能安装此样式。

您需要先安装一款用户样式管理器扩展后才能安装此样式。

您需要先安装一款用户样式管理器扩展后才能安装此样式。

您需要先安装一款用户样式管理器扩展后才能安装此样式。

(我已经安装了用户样式管理器,让我安装!)

// ==UserScript==
// @name         SmolLLM
// @namespace    http://tampermonkey.net/
// @version      0.1.5
// @description  LLM utility library
// @author       RoCry
// @grant        GM_xmlhttpRequest
// @require https://update.greasyfork.org/scripts/528703/1546610/SimpleBalancer.js
// @license MIT
// ==/UserScript==

class SmolLLM {
  constructor() {
    // Ensure SimpleBalancer is available
    if (typeof SimpleBalancer === 'undefined') {
      throw new Error('SimpleBalancer is required for SmolLLM to work');
    }

    // Verify GM_xmlhttpRequest is available
    if (typeof GM_xmlhttpRequest === 'undefined') {
      throw new Error('GM_xmlhttpRequest is required for SmolLLM to work');
    }

    this.balancer = new SimpleBalancer();
    this.logger = console;
  }

  /**
   * Prepares request data based on the provider
   * 
   * @param {string} prompt - User prompt
   * @param {string} systemPrompt - System prompt 
   * @param {string} modelName - Model name
   * @param {string} providerName - Provider name (anthropic, openai, gemini)
   * @param {string} baseUrl - API base URL
   * @returns {Object} - {url, data} for the request
   */
  prepareRequestData(prompt, systemPrompt, modelName, providerName, baseUrl) {
    baseUrl = baseUrl.trim().replace(/\/+$/, '');

    let url, data;

    if (providerName === 'anthropic') {
      url = `${baseUrl}/v1/messages`;
      data = {
        model: modelName,
        max_tokens: 4096,
        messages: [{ role: 'user', content: prompt }],
        stream: true
      };
      if (systemPrompt) {
        data.system = systemPrompt;
      }
    } else if (providerName === 'gemini') {
      url = `${baseUrl}/v1beta/models/${modelName}:streamGenerateContent?alt=sse`;
      data = {
        contents: [{ parts: [{ text: prompt }] }]
      };
      if (systemPrompt) {
        data.system_instruction = { parts: [{ text: systemPrompt }] };
      }
    } else {
      // OpenAI compatible APIs
      const messages = [];
      if (systemPrompt) {
        messages.push({ role: 'system', content: systemPrompt });
      }
      messages.push({ role: 'user', content: prompt });

      data = {
        messages: messages,
        model: modelName,
        stream: true
      };

      // Handle URL based on suffix
      if (baseUrl.endsWith('#')) {
        url = baseUrl.slice(0, -1); // Remove the # and use exact URL
      } else if (baseUrl.endsWith('/')) {
        url = `${baseUrl}chat/completions`; // Skip v1 prefix
      } else {
        url = `${baseUrl}/v1/chat/completions`; // Default pattern
      }
    }

    return { url, data };
  }

  /**
   * Prepares headers for authentication based on the provider
   * 
   * @param {string} providerName - Provider name
   * @param {string} apiKey - API key
   * @returns {Object} - Request headers
   */
  prepareHeaders(providerName, apiKey) {
    const headers = {
      'Content-Type': 'application/json'
    };

    if (providerName === 'anthropic') {
      headers['X-API-Key'] = apiKey;
      headers['Anthropic-Version'] = '2023-06-01';
    } else if (providerName === 'gemini') {
      headers['X-Goog-Api-Key'] = apiKey;
    } else {
      headers['Authorization'] = `Bearer ${apiKey}`;
    }

    return headers;
  }

  /**
   * Process SSE stream data for different providers
   * 
   * @param {string} chunk - Data chunk from SSE
   * @param {string} providerName - Provider name
   * @returns {string|null} - Extracted text content or null
   */
  processStreamChunk(chunk, providerName) {
    if (!chunk || chunk === '[DONE]') return null;

    try {
      console.log(`Processing chunk for ${providerName}:`, chunk.substring(0, 100) + (chunk.length > 100 ? '...' : ''));

      // For Gemini, we need special handling for their SSE format
      if (providerName === 'gemini' && chunk.startsWith('{')) {
        // Some Gemini responses don't have the proper JSON format, try to fix them
        try {
          const data = JSON.parse(chunk);

          if (data.candidates &&
            data.candidates[0].content &&
            data.candidates[0].content.parts &&
            data.candidates[0].content.parts.length > 0) {

            console.log('Found Gemini content part:', data.candidates[0].content.parts[0]);
            return data.candidates[0].content.parts[0].text || '';
          }
        } catch (e) {
          console.error('Error parsing Gemini chunk JSON:', e, chunk);
        }
      } else {
        const data = JSON.parse(chunk);

        if (providerName === 'anthropic') {
          if (data.type === 'content_block_delta' && data.delta && data.delta.text) {
            return data.delta.text;
          } else {
            console.log('Anthropic data structure:', JSON.stringify(data).substring(0, 200));
          }
        } else if (providerName === 'gemini') {
          if (data.candidates &&
            data.candidates[0].content &&
            data.candidates[0].content.parts &&
            data.candidates[0].content.parts[0].text) {
            return data.candidates[0].content.parts[0].text;
          } else {
            console.log('Gemini data structure:', JSON.stringify(data).substring(0, 200));
          }
        } else {
          // OpenAI compatible
          if (data.choices &&
            data.choices[0].delta &&
            data.choices[0].delta.content) {
            return data.choices[0].delta.content;
          } else {
            console.log('OpenAI data structure:', JSON.stringify(data).substring(0, 200));
          }
        }
      }
    } catch (e) {
      // Ignore parsing errors in stream chunks
      console.error(`Error parsing chunk: ${e.message}, chunk: ${chunk}`);
      return null;
    }

    return null;
  }

  /**
   * Makes a request to the LLM API and handles streaming responses
   * 
   * @param {Object} params - Request parameters
   * @returns {Promise<string>} - Full response text
   */
  async askLLM({
    prompt,
    providerName,
    systemPrompt = '',
    model,
    apiKey,
    baseUrl,
    handler = null,
    timeout = 60000
  }) {
    if (!prompt || !providerName || !model || !apiKey || !baseUrl) {
      throw new Error('Required parameters missing');
    }

    // Use balancer to choose API key and base URL pair
    [apiKey, baseUrl] = this.balancer.choosePair(apiKey, baseUrl);

    const { url, data } = this.prepareRequestData(
      prompt, systemPrompt, model, providerName, baseUrl
    );

    const headers = this.prepareHeaders(providerName, apiKey);

    // Log request info (with masked API key)
    const apiKeyPreview = `${apiKey.slice(0, 5)}...${apiKey.slice(-4)}`;
    this.logger.info(
      `Sending ${url} model=${model} api_key=${apiKeyPreview}, len=${prompt.length}`
    );

    return new Promise((resolve, reject) => {
      let responseText = '';
      let buffer = '';
      let timeoutId;

      // Set timeout
      if (timeout) {
        timeoutId = setTimeout(() => {
          reject(new Error(`Request timed out after ${timeout}ms`));
        }, timeout);
      }

      GM_xmlhttpRequest({
        method: 'POST',
        url: url,
        headers: headers,
        data: JSON.stringify(data),
        responseType: 'stream',
        onload: (response) => {
          // This won't be called for streaming responses
          if (response.status !== 200) {
            clearTimeout(timeoutId);
            reject(new Error(`API request failed: ${response.status} - ${response.responseText}`));
          } else {
            console.log(`API request completed with status: ${response.status}`);
            // Sometimes for Gemini, we might get the full response here instead of in chunks
            if (response.responseText && providerName === 'gemini') {
              console.log("Full response received in onload:", response.responseText.substring(0, 100));
              try {
                this.parseGeminiFullResponse(response.responseText, handler, (text) => {
                  responseText += text;
                });
              } catch (e) {
                console.error("Error parsing full Gemini response:", e);
              }
            }
          }
        },
        onreadystatechange: (state) => {
          console.log(`API request state: ${state.readyState}, response length: ${state.responseText?.length || 0}`);
          if (state.readyState === 4) {
            // Request completed
            clearTimeout(timeoutId);
            resolve(responseText);
          }
        },
        onprogress: (response) => {
          // Handle streaming response
          const chunk = response.responseText.substring(buffer.length);
          buffer = response.responseText;

          console.log(`Received chunk size: ${chunk.length}`);

          if (!chunk) return;

          // For Gemini, handle special SSE format
          if (providerName === 'gemini') {
            this.handleGeminiSSE(chunk, responseText, handler);
          } else {
            // Process standard SSE format (data: {...}\n\n)
            const lines = chunk.split('\n\n');

            for (const line of lines) {
              if (!line.trim()) continue;

              if (line.startsWith('data: ')) {
                const content = line.slice(6); // Remove 'data: ' prefix
                const textChunk = this.processStreamChunk(content, providerName);

                if (textChunk) {
                  responseText += textChunk;
                  if (handler && typeof handler === 'function') {
                    handler(textChunk);
                  }
                }
              } else {
                console.log('Unrecognized line format:', line);
              }
            }
          }
        },
        onerror: (error) => {
          clearTimeout(timeoutId);
          console.error('Request error:', error);
          reject(new Error(`Request failed: ${error.error || JSON.stringify(error)}`));
        },
        ontimeout: () => {
          clearTimeout(timeoutId);
          reject(new Error(`Request timed out after ${timeout}ms`));
        }
      });
    });
  }

  /**
   * Handle Gemini's specific SSE format
   * 
   * @param {string} chunk - Raw chunk data
   * @param {string} responseText - Accumulated response
   * @param {Function} handler - Callback function for text chunks
   */
  handleGeminiSSE(chunk, responseText, handler) {
    console.log("Processing Gemini SSE chunk");

    // Split by double newlines to get individual events
    const events = chunk.split('\n\n');

    for (const event of events) {
      // Skip empty lines
      if (!event.trim()) continue;

      // Parse the SSE format
      const lines = event.split('\n');
      let dataContent = '';

      for (const line of lines) {
        if (line.startsWith('data: ')) {
          dataContent = line.substring(6); // Remove 'data: ' prefix
          break;
        }
      }

      if (!dataContent) continue;

      // Handle the data content
      try {
        if (dataContent === '[DONE]') continue;

        const data = JSON.parse(dataContent);

        if (data.candidates &&
          data.candidates[0].content &&
          data.candidates[0].content.parts &&
          data.candidates[0].content.parts.length > 0) {

          const text = data.candidates[0].content.parts[0].text || '';
          if (text) {
            console.log("Found text in Gemini response:", text);
            responseText += text;
            if (handler && typeof handler === 'function') {
              handler(text);
            }
          }
        }
      } catch (e) {
        console.error('Error parsing Gemini SSE data:', e, dataContent);
      }
    }
  }

  /**
   * Parse a full Gemini response that might have been received at once
   * 
   * @param {string} responseText - Full response text
   * @param {Function} handler - Callback function for text chunks
   * @param {Function} accumulator - Function to accumulate text
   */
  parseGeminiFullResponse(responseText, handler, accumulator) {
    const events = responseText.split('\n\n');

    for (const event of events) {
      if (!event.trim()) continue;

      const lines = event.split('\n');
      for (const line of lines) {
        if (line.startsWith('data: ')) {
          const dataContent = line.substring(6);
          if (dataContent === '[DONE]') continue;

          try {
            const data = JSON.parse(dataContent);
            if (data.candidates &&
              data.candidates[0].content &&
              data.candidates[0].content.parts &&
              data.candidates[0].content.parts.length > 0) {

              const text = data.candidates[0].content.parts[0].text || '';
              if (text) {
                accumulator(text);
                if (handler && typeof handler === 'function') {
                  handler(text);
                }
              }
            }
          } catch (e) {
            console.error('Error parsing Gemini data in full response:', e);
          }
        }
      }
    }
  }
}

// Make it available globally
window.SmolLLM = SmolLLM;

// Export for module systems if needed
if (typeof module !== 'undefined') {
  module.exports = SmolLLM;
}