️ DEPRECATED GITLAB INSTANCE ️ This GitLab is now read-only for reference. Please use https://gitlab.iauro.co for all new work.

Migration completed on September 17, 2025

Commit f6431cf8 authored by MotiramShinde's avatar MotiramShinde

longchain assignment

parent 5da8e051
from langgraph.graph import StateGraph, END
def symptom_state():
return {"symptoms": None, "api_result": None}
# Node 1: Get user symptoms
def get_symptoms(state):
symptoms = input("What symptoms are you experiencing? ")
return {"symptoms": symptoms}
# Node 2: Validate symptoms
def validate_symptoms(state):
symptoms = state["symptoms"]
if len(symptoms.strip().split()) < 3:
print("Please provide more details.")
return "get_symptoms"
return "call_api"
# Node 3: Simulate API call
def call_medical_api(state):
symptoms = state["symptoms"]
result = f"Simulated advice for symptoms: {symptoms}"
return {"api_result": result}
# Node 4: Return advice
def return_advice(state):
print("\n=== Diagnosis Summary ===")
print(f"Symptoms: {state['symptoms']}")
print(f"Advice: {state['api_result']}")
return END
# Build graph
builder = StateGraph(symptom_state)
builder.add_node("get_symptoms", get_symptoms)
builder.add_node("validate", validate_symptoms)
builder.add_node("call_api", call_medical_api)
builder.add_node("return_advice", return_advice)
builder.set_entry_point("get_symptoms")
builder.add_edge("get_symptoms", "validate")
builder.add_conditional_edges("validate", validate_symptoms)
builder.add_edge("call_api", "return_advice")
app = builder.compile()
app.invoke({})
from langchain.chains import LLMChain, SequentialChain
from langchain_community.document_loaders import PyPDFLoader, Docx2txtLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI # Works with DeepSeek's OpenAI-compatible API
from langchain.agents import Tool, AgentExecutor
from langchain.agents import create_react_agent
from langchain import hub
from dotenv import load_dotenv
import os
# Load environment variables
load_dotenv()
# Initialize DeepSeek LLM
llm = ChatOpenAI(
model="deepseek-chat", # Verify correct model name with DeepSeek's documentation
openai_api_key=os.getenv("DEEPSEEK_API_KEY"),
base_url="https://api.deepseek.com/v1", # Confirm this endpoint with DeepSeek
temperature=0
)
# Document loader function
def load_document(file_path):
if file_path.endswith('.pdf'):
loader = PyPDFLoader(file_path)
elif file_path.endswith('.docx'):
loader = Docx2txtLoader(file_path)
else:
raise ValueError("Unsupported file format")
return loader.load()
# Text splitter configuration
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
# Prompt templates
obligations_prompt = PromptTemplate(
input_variables=["text"],
template="""
Analyze the following contract text and extract all obligations for each party.
Return the results in a structured format with party names and their obligations.
Contract text: {text}
"""
)
dates_prompt = PromptTemplate(
input_variables=["text"],
template="""
Extract all important dates from the following contract text including:
- Effective dates
- Termination dates
- Delivery deadlines
- Payment due dates
- Other significant timelines
Contract text: {text}
"""
)
risks_prompt = PromptTemplate(
input_variables=["text"],
template="""
Identify potential risks in the following contract text including:
- Unfavorable terms
- Ambiguous language
- Unbalanced obligations
- Unreasonable liabilities
- Other potential risk factors
Contract text: {text}
"""
)
# Create analysis chains
obligations_chain = LLMChain(llm=llm, prompt=obligations_prompt, output_key="obligations")
dates_chain = LLMChain(llm=llm, prompt=dates_prompt, output_key="dates")
risks_chain = LLMChain(llm=llm, prompt=risks_prompt, output_key="risks")
# Sequential chain to run all analyses
contract_chain = SequentialChain(
chains=[obligations_chain, dates_chain, risks_chain],
input_variables=["text"],
output_variables=["obligations", "dates", "risks"],
verbose=True
)
# Tools for the agent
tools = [
Tool(
name="Contract Analyzer",
func=lambda text: contract_chain({"text": text}),
description="Useful for analyzing contracts and extracting obligations, dates, and risks"
)
]
# Create agent
agent_prompt = hub.pull("hwchase17/react")
agent = create_react_agent(llm, tools, agent_prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
# Main analysis function
def analyze_contract(file_path):
# Load and split document
docs = load_document(file_path)
texts = text_splitter.split_documents(docs)
# Process each chunk (for large documents)
full_results = {"obligations": [], "dates": [], "risks": []}
for text in texts:
result = agent_executor.invoke({"input": f"Analyze this contract text: {text.page_content}"})
# Aggregate results
for key in full_results:
if key in result and result[key] not in full_results[key]:
full_results[key].append(result[key])
return full_results
if __name__ == "__main__":
file_path = input("Enter path to contract file (PDF or DOCX): ").strip()
try:
results = analyze_contract(file_path)
print("\n=== Contract Analysis Results ===")
print("\nObligations:")
for obligation in results["obligations"]:
print(f"- {obligation}")
print("\nImportant Dates:")
for date in results["dates"]:
print(f"- {date}")
print("\nPotential Risks:")
for risk in results["risks"]:
print(f"- {risk}")
except Exception as e:
print(f"\nError: {str(e)}")
print("Please check:")
print("- File path is correct")
print("- File is PDF or DOCX format")
print("- DeepSeek API key is valid")
from langchain_community.document_loaders import PyPDFLoader
#from langchain_community.llms import OpenAI
from langchain_openai import OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
# Load and split PDF
loader = PyPDFLoader("contract.pdf")
pages = loader.load_and_split()
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
docs = splitter.split_documents(pages)
# Prompt Template
with open("prompts/extract_prompt.txt", "r") as f:
prompt_template = f.read()
prompt = PromptTemplate(input_variables=["document"], template=prompt_template)
llm = OpenAI(temperature=0)
chain = LLMChain(llm=llm, prompt=prompt)
# Process top 3 chunks
for i, chunk in enumerate(docs[:3]):
print(f"\n--- Chunk {i+1} ---")
result = chain.run(document=chunk.page_content)
print(result)
You are a legal contract analyzer. Extract the following from the given contract text:
1. Obligations of the parties
2. Important dates (deadlines, effective dates, etc.)
3. Any potential risks or liabilities
Document:
{document}
Return the output in this JSON format:
{{
"obligations": [...],
"dates": [...],
"risks": [...]
}}
langchain
openai
pypdf
chromadb
tiktoken
python3
\ No newline at end of file
/usr/bin/python3
\ No newline at end of file
lib
\ No newline at end of file
home = /usr/bin
include-system-site-packages = false
version = 3.10.17
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment