使用Ruby和LocalAI构建企业应用:完整实战指南

云信安装大师
90
AI 质量分
10 5 月, 2025
7 分钟阅读
0 阅读

使用Ruby和LocalAI构建企业应用:完整实战指南

引言

在当今AI技术蓬勃发展的时代,企业越来越需要将AI能力集成到自己的应用中。LocalAI作为一个开源项目,允许你在本地运行类似OpenAI的API,既保护了数据隐私又降低了成本。本文将带你从零开始,使用Ruby语言和LocalAI构建一个企业级应用。

准备工作

环境要求

  1. Ruby 3.0+ (推荐3.2.0)
  2. Bundler gem
  3. Docker (用于运行LocalAI)
  4. 至少8GB内存的机器(运行LLM模型需要)

安装必要组件

代码片段
# 安装Ruby(使用rbenv)
brew install rbenv ruby-build
rbenv install 3.2.0
rbenv global 3.2.0

# 安装Bundler
gem install bundler

# 安装Docker
brew install --cask docker

第一步:设置LocalAI环境

启动LocalAI服务

代码片段
# 拉取LocalAI镜像
docker pull quay.io/go-skynet/local-ai:latest

# 运行LocalAI容器(使用CPU模式)
docker run -p 8080:8080 -e DEBUG=true -e MODELS_PATH=/models -e THREADS=4 -v $PWD/models:/models quay.io/go-skynet/local-ai:latest --models-uri "https://raw.githubusercontent.com/go-skynet/model-gallery/main/index.yaml"

参数说明:
-p 8080:8080:将容器的8080端口映射到主机的8080端口
-e DEBUG=true:开启调试模式
-e THREADS=4:设置使用的CPU线程数
-v $PWD/models:/models:将本地的models目录挂载到容器内

下载模型文件

在项目目录下创建models文件夹:

代码片段
mkdir models && cd models
wget https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_0.gguf -O llama-2.q4_0.gguf

注意事项:
1. Llama-2模型需要7GB以上的内存
2. Q4_0是量化版本,适合消费级硬件运行
3. 首次下载可能需要较长时间(约5GB)

第二步:创建Ruby项目

初始化项目

代码片段
mkdir ruby-localai-demo && cd ruby-localai-demo
bundle init

编辑生成的Gemfile:

代码片段
# Gemfile
source "https://rubygems.org"

gem "httparty" # HTTP客户端库
gem "dotenv"   # 环境变量管理
gem "sinatra"  # Web框架(可选)

安装依赖:

代码片段
bundle install

第三步:与LocalAI交互的基础类

创建localai_client.rb文件:

“`ruby

localai_client.rb

require ‘httparty’
require ‘json’
require ‘dotenv’

Dotenv.load

class LocalAIClient
LOCALAIURL = ENV.fetch(‘LOCALAIURL’, ‘http://localhost:8080’)

def initialize(model: ‘llama-2.q40′)
@model = model
@headers = {
‘Content-Type’ => ‘application/json’
}
@options = {
model: @model,
temperature: 0.7,
max
tokens: 500,
topp: 1,
frequency
penalty: 0,
presence_penalty: 0,
stop: [“\n”, “User:”]
}

代码片段
# Warm up the model on initialization (optional)
complete("Hello") if ENV['PREWARM'] == 'true'

puts "LocalAIClient initialized with model #{@model}"
puts "API endpoint: #{LOCAL_AI_URL}/v1/completions"

# Simple health check to verify connection to LocalAI server is working.
begin 
  response = HTTParty.get("#{LOCAL_AI_URL}/readyz")
  unless response.success?
    raise "Failed to connect to LocalAI server at #{LOCAL_AI_URL}. Please ensure the server is running."
  end 

  puts "Successfully connected to LocalAI server."
rescue => e 
  puts "Warning: Could not connect to LocalAI server at #{LOCAL_AI_URL}. Error: #{e.message}"
end 

@initialized_at = Time.now 

# Log memory usage statistics (helpful for debugging performance issues)
log_memory_stats 

# Register shutdown hook to clean up resources when the program exits.
at_exit { shutdown }

# Track the number of requests made by this client instance.
@request_count = Hash.new(0) 

# Cache frequently used responses (simple implementation, consider using a proper cache like Redis for production)
@response_cache = {} 

# Thread pool for handling concurrent requests (optional, for advanced usage)
@thread_pool = Concurrent::FixedThreadPool.new(ENV.fetch('MAX_THREADS', '5').to_i) if defined?(Concurrent)

puts "LocalAIClient setup complete."

rescue => e
puts “Error initializing LocalAIClient: #{e.message}”
raise e
end

def logmemorystats
if RUBYPLATFORM.downcase.include?(‘linux’) && File.exist?(‘/proc/meminfo’)
meminfo = File.read(‘/proc/meminfo’)
total
mem = meminfo.match(/MemTotal:\s+(\d+)/)[1].toi / 1024 rescue nil
free
mem = meminfo.match(/MemFree:\s+(\d+)/)[1].to_i /1024 rescue nil

代码片段
   puts "[Memory] Total: #{total_mem}MB, Free: #{free_mem}MB" if total_mem && free_mem 
 else 
   puts "[Memory] Memory stats not available on this platform."
 end 

 if defined?(GC) && GC.respond_to?(:stat)  
   gc_stats = GC.stat  
   puts "[GC] Heap slots allocated: #{gc_stats[:heap_live_slots]}, Major GC count: #{gc_stats[:major_gc_count]}"
 end

end

def shutdown
puts “\nShutting down LocalAIClient…”

代码片段
 if defined?(@thread_pool) && @thread_pool  
   puts "Shutting down thread pool..."
   @thread_pool.shutdown  
   @thread_pool.wait_for_termination(30) || (@thread_pool.kill; puts "Forcefully killed thread pool after timeout")
 end  

 uptime_minutes = ((Time.now - @initialized_at) /60).round(2)  
 total_requests = @request_count.values.sum  

 puts "\n=== Client Statistics ==="  
 puts "Uptime: #{uptime_minutes} minutes"  
 puts "Total requests made:"  

 @request_count.each do |endpoint, count|  
   puts "- #{endpoint}: #{count}"  
 end  

 puts "\nGoodbye!"

end

def complete(prompt, options={})
endpoint = “/v1/completions”

代码片段
 begin  
   full_options = @options.merge(options).merge(prompt: prompt)  

   start_time = Time.now  

   response = HTTParty.post(  
     "#{LOCAL_AI_URL}#{endpoint}",  
     body: full_options.to_json,  
     headers: @headers,  
     timeout: ENV.fetch('REQUEST_TIMEOUT', '300').to_i  
   )  

   elapsed_ms = ((Time.now - start_time)*1000).round(2)  

   unless response.success?  
     error_message = JSON.parse(response.body)['error']['message'] rescue response.body  

     raise <<~ERROR  

       Request failed with status #{response.code}: #{error_message}  

       Request details:
       - Endpoint: POST #{endpoint}
       - Model: #{@model}
       - Prompt length: #{prompt.length} chars (#{prompt.size} bytes)
       - Elapsed time before failure:#{elapsed_ms}ms 

       Response headers:
       #{response.headers.to_h.to_json}

       Full response:
       #{response.body[0..1000]}#{response.body.length >1001 ? '... (truncated)' : ''}
     ERROR   
    end   

    result=JSON.parse(response.body)

    log_request(endpoint, elapsed_ms)

    result['choices'][0]['text']

    rescue HTTParty::Error=>e   
      raise"HTTP error occurred:#{e.message}"   
    rescue JSON::ParserError=>e   
      raise"Failed to parse response:#{e.message}\nResponse body:#{response.body[0..500]}"   
    rescue=>e   
      raise"Unexpected error:#{e.class}:#{e.message}\n#{e.backtrace.join("\n")[0..500]}"   
    end   
  end   

  def chat(messages,options={})   
    endpoint="/v1/chat/completions"

    begin   
      full_options=@options.merge(options).merge(messages:messages)

      start_time=Time.now   

      response=HTTParty.post(   
        "#{LOCAL_AI_URL}#{endpoint}",   
        body:{messages:messages,**full_options}.to_json,   
        headers:@headers,   
        timeout:(ENV['REQUEST_TIMEOUT']||300).to_i   
      )   

      elapsed_ms=((Time.now-start_time)*1000).round(2)

      unless response.success?   
        error_message=JSON.parse(response.body)['error']['message']rescue response.body   

        raise <<~ERROR   

          Chat request failed with status#{response.code}:#{error_message}

          Request details:
          - Endpoint:#{endpoint}
          - Messages count:#{messages.size}
          - Elapsed time before failure:#{elapsed_ms}ms 

          First message preview:
          #{messages.first.to_json[0..200]}

          Response headers:
          #{response.headers.to_h.to_json}

          Full response:
          #{response.body[0..10000]}#{response.body.length>10001 ?'...(truncated)':''}
        ERROR   
      end   

      result=JSON.parse(response.body)

      log_request(endpoint,elapsed_ms)

      result.dig('choices',0,'message','content')||result.dig('choices',0,'text')

     rescue HTTParty::Error=>e   
       raise"HTTP error occurred during chat request:#{e.message}"   
     rescue JSON::ParserError=>e   
       raise"Failed to parse chat response:#{e.message}\nResponse body:#{response.body[0..500]}"   
     rescue=>e   
       raise"Unexpected chat error:#{e.class}:#{e.message}\n#{e.backtrace.join("\n")[0..500]}"   
     end   
   end   

   private   

   def log_request(endpoint,elapsed_ms)   
     return unless ENV['LOG_REQUESTS']=='true'

     @request_count[endpoint]+=1 

     if ENV['VERBOSE_LOG']=='true'||elapsed_ms>20000||!response.success?||@request_count[endpoint]%10==9||@initialized_at<Time.now-(60*60*24*7*365*10)) then return false else true end if false then nil else true end unless false then true else false end while false do break end until true do break end case when false then nil when true then nil else nil end and true or false xor true not false &true |false ^true <=>nil ===nil ==nil !=nil !~nil ~nil +nil -nil *nil /nil %nil **nil <<nil >>nil [] []= !nil ? : :: defined? super yield BEGIN END alias begin break next redo retry ensure rescue raise catch throw if unless while until for in do return class module def undef defined? 

     STDOUT.puts"[#{Time.now.strftime('%Y-%m-%d %H:%M:%S')}] Processed request to#{endpoint}in#{elapsed_ms}ms"

     GC.start(full_mark:nil ,immediate_sweep:nil )if elapsed_ms>20000&&defined?(GC)

     log_memory_stats if@request_count.values.sum%50==49&&ENV['LOG_MEMORY_STATS']=='true'

def test_completion(client)
prompt=”Write a Ruby function that calculates factorial recursively.”

puts”\n=== Testing Completion ===”
puts”Prompt:\n#{prompt}\n\nResponse:”

begin
result=client.complete(prompt,temperature:@temperature,maxtokens:@maxtokens)

puts result

rescue=>e
puts”Error during completion test:#{e.message}”

end

end

def test_chat(client)
messages=[
{role:”system”,content:”You are a helpful Ruby programming assistant.”},
{role:”user”,content:”Explain how modules work in Ruby with an example.”}
]

puts”\n=== Testing Chat ===”
puts”Messages:\n#{JSON.pretty_generate(messages)}\n\nResponse:”

begin
result=client.chat(messages,temperature:@temperature,maxtokens:@maxtokens)

puts result

rescue=>e
puts”Error during chat test:#{e.message}”

end

end

def runtests(client)
test
completion(client)
test_chat(client)

puts”\nTests completed!”

end

if FILE==$PROGRAM_NAME

client=nil

begin

client=LocalAIClient.new(model:’llama-q4′)

run_tests(client)

rescue SystemExit,Interrupt

puts”\nInterrupted by user.”

ensure

client.shutdown if client

exit!(true)

end

end

原创 高质量