The python library for research and development in NLP, multimodal LLMs, Agents, ML, Knowledge Graphs, and more.
1.3k
Stars
86
Forks
14
Watchers
8
Issues
npcpy 是一个灵活的 Agent 框架,用于构建 AI 应用及开展大语言模型(LLM)相关研究。它支持本地及云端模型提供商、多 Agent 协作、工具调用(Tool calling)、图像/音频/视频生成、知识图谱、微调等多种功能。
pip install npcpy
from npcpy import NPC
simon = NPC(
name='Simon Bolivar',
primary_directive='Liberate South America from the Spanish Royalists.',
model='gemma3:4b',
provider='ollama'
)
response = simon.get_llm_response("What is the most important territory to retain in the Andes?")
print(response['response'])
from npcpy import get_llm_response
response = get_llm_response("Who was the celtic messenger god?", model='qwen3:4b', provider='ollama')
print(response['response'])
# 或者使用 ollama 的云端模型
test = get_llm_response('who is john wick', model='minimax-m2.7:cloud', provider='ollama',)
print(test['response'])
from npcpy import Agent, ToolAgent, CodingAgent
# Agent — 自带默认工具 (sh, python, edit_file, web_search 等)
agent = Agent(name='ops', model='qwen3.5:2b', provider='ollama')
print(agent.run("Find all Python files over 500 lines in this repo and list them"))
# ToolAgent — 在默认工具之外添加自定义工具
import subprocess
def run_tests(test_path: str = "tests/") -> str:
"""Run pytest on the given path and return results."""
result = subprocess.run(["python3", "-m", "pytest", test_path, "-v", "--tb=short"],
capture_output=True, text=True, timeout=120)
return result.stdout + result.stderr
def git_diff(branch: str = "main") -> str:
"""Show the git diff against a branch."""
result = subprocess.run(["git", "diff", branch, "--stat"], capture_output=True, text=True)
return result.stdout
reviewer = ToolAgent(
name='code_reviewer',
primary_directive='You review code changes, run tests, and report issues.',
tools=[run_tests, git_diff],
model='qwen3.5:2b', provider='ollama'
)
print(reviewer.run("Run the tests and summarize any failures"))
# CodingAgent — 自动执行 LLM 回复中的代码块
coder = CodingAgent(name='coder', language='python', model='qwen3.5:2b', provider='ollama')
print(coder.run("Write a script that finds duplicate files by hash in the current directory"))
from npcpy import get_llm_response
response = get_llm_response("Explain quantum entanglement.", model='qwen3.5:2b', provider='ollama', stream=True)
for chunk in response['response']:
print(chunk.get('message', {}).get('content', ''), end='', flush=True)
在 Prompt 中包含预期的 JSON 结构。设置 format='json' 后,响应会自动解析,response['response'] 将直接成为字典或列表。
from npcpy import get_llm_response
response = get_llm_response(
'''List 3 planets from the sun.
Return JSON: {"planets": [{"name": "planet name", "distance_au": 0.0, "num_moons": 0}]}''',
model='qwen3.5:2b', provider='ollama',
format='json'
)
for planet in response['response']['planets']:
print(f"{planet['name']}: {planet['distance_au']} AU, {planet['num_moons']} moons")
response = get_llm_response(
'''Analyze this review: 'The battery life is amazing but the screen is too dim.'
Return JSON: {"tone": "positive/negative/mixed", "key_phrases": ["phrase1", "phrase2"], "confidence": 0.0}''',
model='qwen3.5:2b', provider='ollama',
format='json'
)
result = response['response']
print(result['tone'], result['key_phrases'])
传入一个 Pydantic 模型,系统会将 JSON Schema 直接发送给 LLM。
from npcpy import get_llm_response
from pydantic import BaseModel
from typing import List
class Planet(BaseModel):
name: str
distance_au: float
num_moons: int
class SolarSystem(BaseModel):
planets: List[Planet]
response = get_llm_response(
"List the first 4 planets from the sun.",
model='qwen3.5:2b', provider='ollama',
format=SolarSystem
)
for p in response['response']['planets']:
print(f"{p['name']}: {p['distance_au']} AU, {p['num_moons']} moons")
from npcpy.llm_funcs import gen_image, gen_video
from npcpy.gen.audio_gen import text_to_speech
# 图像 — 支持 OpenAI, Gemini, Ollama 或 diffusers
images = gen_image("A sunset over the mountains", model='gpt-image-1', provider='openai')
images[0].save("sunset.png")
# 音频 — 支持 OpenAI, Gemini, ElevenLabs, Kokoro, gTTS
audio_bytes = text_to_speech("Hello from npcpy!", engine="openai", voice="alloy")
with open("hello.wav", "wb") as f:
f.write(audio_bytes)
# 视频 — 支持 Gemini Veo
result = gen_video("A cat riding a skateboard", model='veo-3.1-fast-generate-preview', provider='gemini')
print(result['output'])
from npcpy import Team, Agent
# 定义多个 Agent 组成团队协同工作
researcher = Agent(name='researcher', model='qwen3.5:2b', provider='ollama')
writer = Agent(name='writer', model='qwen3.5:2b', provider='ollama')
team = Team(agents=[researcher, writer])
print(team.run("Research the history of coffee and write a brief summary"))
npcpy 是一个高度灵活的 Python AI 开发框架,专为自然语言处理、多模态大模型应用及智能体研究设计。该项目简化了从基础 LLM 调用到复杂多智能体协作的构建过程,并原生集成了 MCP 协议以扩展工具调用能力。
支持创建具有角色设定、指令集和自定义工具的智能体,并内置了多种智能体子类以满足代码执行等特殊需求。支持多智能体团队协作,通过协调器实现复杂任务编排与团队流程管理。提供基于 Jinja 模板的 Jinx 工作流,实现多步骤提示词管道的标准化与模块化。集成了 MCP(Model Context Protocol)标准,允许智能体无缝接入外部工具与数据服务。支持 Pydantic 结构化输出、多媒体内容生成以及基于 Markdown 文件的快速智能体定义与配置。
该项目适用于希望快速构建复杂智能体应用的研究人员与开发者,特别适合需要实现多智能体协作、外部工具集成以及标准化流程编排的开发场景。