SmolLLM

LLM utility library

当前为 2025-03-04 提交的版本,查看 最新版本

此脚本不应直接安装。它是供其他脚本使用的外部库,要使用该库请加入元指令 // @require https://update.cn-greasyfork.org/scripts/528704/1546875/SmolLLM.js

您需要先安装一个扩展,例如 篡改猴Greasemonkey暴力猴,之后才能安装此脚本。

You will need to install an extension such as Tampermonkey to install this script.

您需要先安装一个扩展,例如 篡改猴暴力猴,之后才能安装此脚本。

您需要先安装一个扩展,例如 篡改猴Userscripts ,之后才能安装此脚本。

您需要先安装一款用户脚本管理器扩展,例如 Tampermonkey,才能安装此脚本。

您需要先安装用户脚本管理器扩展后才能安装此脚本。

(我已经安装了用户脚本管理器,让我安装!)

您需要先安装一款用户样式管理器扩展,比如 Stylus,才能安装此样式。

您需要先安装一款用户样式管理器扩展,比如 Stylus,才能安装此样式。

您需要先安装一款用户样式管理器扩展,比如 Stylus,才能安装此样式。

您需要先安装一款用户样式管理器扩展后才能安装此样式。

您需要先安装一款用户样式管理器扩展后才能安装此样式。

您需要先安装一款用户样式管理器扩展后才能安装此样式。

(我已经安装了用户样式管理器,让我安装!)

// ==UserScript==
// @name         SmolLLM
// @namespace    http://tampermonkey.net/
// @version      0.1.13
// @description  LLM utility library
// @author       RoCry
// @require https://update.greasyfork.org/scripts/528703/1546610/SimpleBalancer.js
// @license MIT
// ==/UserScript==

class SmolLLM {
  constructor() {
    if (typeof SimpleBalancer === 'undefined') {
      throw new Error('SimpleBalancer is required for SmolLLM to work');
    }

    this.balancer = new SimpleBalancer();
    this.logger = console;
  }

  /**
   * Prepares request data based on the provider
   * 
   * @param {string} prompt - User prompt
   * @param {string} systemPrompt - System prompt 
   * @param {string} modelName - Model name
   * @param {string} providerName - Provider name (anthropic, openai, gemini)
   * @param {string} baseUrl - API base URL
   * @returns {Object} - {url, data} for the request
   */
  prepareRequestData(prompt, systemPrompt, modelName, providerName, baseUrl) {
    let url, data;

    if (providerName === 'anthropic') {
      url = `${baseUrl}/v1/messages`;
      data = {
        model: modelName,
        max_tokens: 4096,
        messages: [{ role: 'user', content: prompt }],
        stream: true
      };
      if (systemPrompt) {
        data.system = systemPrompt;
      }
    } else if (providerName === 'gemini') {
      url = `${baseUrl}/v1beta/models/${modelName}:streamGenerateContent?alt=sse`;
      data = {
        contents: [{ parts: [{ text: prompt }] }]
      };
      if (systemPrompt) {
        data.system_instruction = { parts: [{ text: systemPrompt }] };
      }
    } else {
      // OpenAI compatible APIs
      const messages = [];
      if (systemPrompt) {
        messages.push({ role: 'system', content: systemPrompt });
      }
      messages.push({ role: 'user', content: prompt });

      data = {
        messages: messages,
        model: modelName,
        stream: true
      };

      // Handle URL based on suffix
      if (baseUrl.endsWith('#')) {
        url = baseUrl.slice(0, -1); // Remove the # and use exact URL
      } else if (baseUrl.endsWith('/')) {
        url = `${baseUrl}chat/completions`; // Skip v1 prefix
      } else {
        url = `${baseUrl}/v1/chat/completions`; // Default pattern
      }
    }

    return { url, data };
  }

  prepareHeaders(providerName, apiKey) {
    const headers = {
      'Content-Type': 'application/json'
    };

    if (providerName === 'anthropic') {
      headers['X-API-Key'] = apiKey;
      headers['Anthropic-Version'] = '2023-06-01';
    } else if (providerName === 'gemini') {
      headers['X-Goog-Api-Key'] = apiKey;
    } else {
      headers['Authorization'] = `Bearer ${apiKey}`;
    }

    return headers;
  }

  /**
   * Process SSE stream data for different providers
   * 
   * @param {string} chunk - Data chunk from SSE
   * @param {string} providerName - Provider name
   * @returns {string|null} - Extracted text content or null
   */
  processStreamChunk(chunk, providerName) {
    if (!chunk || chunk === '[DONE]') return null;

    try {
      this.logger.log(`Processing chunk for ${providerName}:`, chunk);
      
      const data = JSON.parse(chunk);

      if (providerName === 'gemini') {
        const candidates = data.candidates || [];
        if (candidates.length > 0 && candidates[0].content) {
          const parts = candidates[0].content.parts;
          if (parts && parts.length > 0) {
            return parts[0].text || '';
          }
        } else {
          this.logger.log(`No content found in chunk for ${providerName}: ${chunk}`);
          return null;
        }
      } else if (providerName === 'anthropic') {
        // Handle content_block_delta which contains the actual text
        if (data.type === 'content_block_delta') {
          const delta = data.delta || {};
          if (delta.type === 'text_delta' || delta.text) {
            return delta.text || '';
          }
        }
        // Anthropic sends various event types - only some contain text
        return null;
      } else {
        // OpenAI compatible format
        const choice = (data.choices || [{}])[0];
        if (choice.finish_reason !== null && choice.finish_reason !== undefined) {
          return null; // End of generation
        }
        return choice.delta && choice.delta.content ? choice.delta.content : null;
      }
    } catch (e) {
      this.logger.error(`Error parsing chunk: ${e.message}, chunk: ${chunk}`);
      return null;
    }
  }

  /**
   * @returns {Promise<string>} - Full final response text
   */
  async askLLM({
    prompt,
    providerName,
    systemPrompt = '',
    model,
    apiKey,
    baseUrl,
    handler = null,  // handler(delta, fullText)
    timeout = 60000
  }) {
    if (!prompt || !providerName || !model || !apiKey || !baseUrl) {
      throw new Error('Required parameters missing');
    }

    // Use balancer to choose API key and base URL pair
    [apiKey, baseUrl] = this.balancer.choosePair(apiKey, baseUrl);

    const { url, data } = this.prepareRequestData(
      prompt, systemPrompt, model, providerName, baseUrl
    );

    const headers = this.prepareHeaders(providerName, apiKey);

    // Log request info (with masked API key)
    const apiKeyPreview = `${apiKey.slice(0, 5)}...${apiKey.slice(-4)}`;
    this.logger.info(
      `[SmolLLM] Request: ${url} | model=${model} | provider=${providerName} | api_key=${apiKeyPreview} | prompt=${prompt.length}`
    );

    // Create an AbortController for timeout handling
    const controller = new AbortController();
    const timeoutId = setTimeout(() => {
      controller.abort();
    }, timeout);

    try {
      const response = await fetch(url, {
        method: 'POST',
        headers: headers,
        body: JSON.stringify(data),
        signal: controller.signal
      });

      if (!response.ok) {
        throw new Error(`HTTP error ${response.status}: ${await response.text() || 'Unknown error'}`);
      }

      // Handle streaming response
      const reader = response.body.getReader();
      const decoder = new TextDecoder();
      let fullText = '';
      let buffer = '';

      while (true) {
        const { done, value } = await reader.read();
        if (done) break;
        
        const chunk = decoder.decode(value, { stream: true });
        buffer += chunk;
        
        // Process SSE data
        this.processSSEChunks(chunk, providerName, (delta) => {
          if (delta) {
            fullText += delta;
            if (handler) handler(delta, fullText);
          }
        });
      }

      clearTimeout(timeoutId);
      return fullText;
    } catch (error) {
      clearTimeout(timeoutId);
      if (error.name === 'AbortError') {
        throw new Error(`Request timed out after ${timeout}ms`);
      }
      throw error;
    }
  }

  /**
   * Process SSE chunks for different providers
   * 
   * @param {string} text - The SSE text chunk
   * @param {string} providerName - Provider name
   * @param {Function} callback - Callback function for each delta
   */
  processSSEChunks(text, providerName, callback) {
    // Split the input by newlines
    const lines = text.split('\n');
    
    for (let line of lines) {
      line = line.trim();
      if (!line) continue;
      
      // Check for data prefix
      if (line.startsWith('data: ')) {
        const data = line.slice(6).trim();
        
        // Skip [DONE] marker
        if (data === '[DONE]') continue;
        
        // Process the chunk based on provider
        const delta = this.processStreamChunk(data, providerName);
        callback(delta);
      }
    }
  }
}

// Make it available globally
window.SmolLLM = SmolLLM;

// Export for module systems if needed
if (typeof module !== 'undefined') {
  module.exports = SmolLLM;
}