Python 实现 Ollama 提示词生成与优化系统

1. 基础环境配置

import requests
import json
from typing import List, Dict, Optional
from dataclasses import dataclass

@dataclass
class PromptContext:
    task: str
    domain: str
    requirements: List[str]

class OllamaService:
    def __init__(self, base_url: str = "http://localhost:11434"):
        self.base_url = base_url
        self.models = {
            'mistral': 'mistral',
            'llama2': 'llama2',
            'neural-chat': 'neural-chat'
        }

2. 核心功能实现

2.1 提示词生成服务

class PromptGenerationService:
    def __init__(self, model_name: str = 'mistral'):
        self.model_name = model_name
        self.api_url = "http://localhost:11434/api/generate"

    async def generate_prompt(self, context: PromptContext) -> str:
        prompt = f"""
        Task: Create a detailed prompt for the following context:
        - Task Type: {context.task}
        - Domain: {context.domain}
        - Requirements: {', '.join(context.requirements)}
        
        Generate a structured prompt that includes:
        1. Context setting
        2. Specific requirements
        3. Output format
        4. Constraints
        5. Examples (if applicable)
        """

        response = requests.post(
            self.api_url,
            json={
                "model": self.model_name,
                "prompt": prompt,
                "stream": False
            }
        )
        
        return response.json()["response"]

    async def optimize_prompt(self, original_prompt: str) -> Dict:
        prompt = f"""
        Analyze and optimize the following prompt:
        "{original_prompt}"
        
        Provide:
        1. Improved version
        2. Explanation of changes
        3. Potential variations
        """

        response = requests.post(
            self.api_url,
            json={
                "model": self.model_name,
                "prompt": prompt,
                "stream": False
            }
        )
        
        return response.json()["response"]

2.2 提示词模板管理

class PromptTemplates:
    @staticmethod
    def get_code_review_template(code: str) -> str:
        return f"""
        Analyze the following code:
        {code}
        
        Provide:
        1. Code quality assessment
        2. Potential improvements
        3. Security concerns
        4. Performance optimization
        """

    @staticmethod
    def get_documentation_template(component: str) -> str:
        return f"""
        Generate documentation for:
        {component}
        
        Include:
        1. Overview
        2. API reference
        3. Usage examples
        4. Best practices
        """

    @staticmethod
    def get_refactoring_template(code: str) -> str:
        return f"""
        Suggest refactoring for:
        {code}
        
        Consider:
        1. Design patterns
        2. Clean code principles
        3. Performance impact
        4. Maintainability
        """

3. 使用示例

async def main():
    # 初始化服务
    prompt_service = PromptGenerationService(model_name='mistral')
    
    # 代码生成提示词示例
    code_context = PromptContext(
        task='code_generation',
        domain='web_development',
        requirements=[
            'React component',
            'TypeScript',
            'Material UI',
            'Form handling'
        ]
    )
    
    code_prompt = await prompt_service.generate_prompt(code_context)
    print("代码生成提示词:", code_prompt)
    
    # 文档生成提示词示例
    doc_context = PromptContext(
        task='documentation',
        domain='API_reference',
        requirements=[
            'OpenAPI format',
            'Examples included',
            'Error handling',
            'Authentication details'
        ]
    )
    
    doc_prompt = await prompt_service.generate_prompt(doc_context)
    print("文档生成提示词:", doc_prompt)

    # 提示词优化示例
    original_prompt = "写一个React组件"
    optimized_prompt = await prompt_service.optimize_prompt(original_prompt)
    print("优化后的提示词:", optimized_prompt)

if __name__ == "__main__":
    import asyncio
    asyncio.run(main())

4. 工具类实现

class PromptUtils:
    @staticmethod
    def format_requirements(requirements: List[str]) -> str:
        return "\n".join([f"- {req}" for req in requirements])

    @staticmethod
    def validate_prompt(prompt: str) -> bool:
        # 简单的提示词验证
        return len(prompt.strip()) > 0

    @staticmethod
    def enhance_prompt(prompt: str) -> str:
        # 添加通用的提示词增强
        return f"""
        {prompt}
        
        Additional requirements:
        - Provide clear and detailed explanations
        - Include practical examples
        - Consider edge cases
        - Follow best practices
        """

5. 错误处理

class PromptGenerationError(Exception):
    pass

class ModelConnectionError(Exception):
    pass

def handle_api_errors(func):
    async def wrapper(*args, **kwargs):
        try:
            return await func(*args, **kwargs)
        except requests.exceptions.ConnectionError:
            raise ModelConnectionError("无法连接到Ollama服务")
        except Exception as e:
            raise PromptGenerationError(f"提示词生成错误: {str(e)}")
    return wrapper

6. 配置管理

class Config:
    MODELS = {
        'mistral': {
            'name': 'mistral',
            'description': '快速、轻量级提示词生成',
            'parameters': {
                'temperature': 0.7,
                'max_tokens': 2000
            }
        },
        'llama2': {
            'name': 'llama2',
            'description': '复杂、详细的提示词需求',
            'parameters': {
                'temperature': 0.8,
                'max_tokens': 4000
            }
        },
        'neural-chat': {
            'name': 'neural-chat',
            'description': '交互式提示词优化',
            'parameters': {
                'temperature': 0.9,
                'max_tokens': 3000
            }
        }
    }

使用这个Python实现,你可以:

  1. 生成结构化的提示词
  2. 优化现有提示词
  3. 使用预定义模板
  4. 处理各种场景的提示词需求

主要优点:

  1. 面向对象的设计
  2. 异步支持
  3. 错误处理
  4. 类型提示
  5. 配置管理
  6. 模块化结构

这个实现可以作为一个基础框架,根据具体需求进行扩展和定制。

作者:老大白菜

物联沃分享整理
物联沃-IOTWORD物联网 » Python 实现 Ollama 提示词生成与优化系统

发表回复