Try this code. Try utilizing semantic coding ( semantic compression and decompression ) More concise and less redundant than natural language. The LLM should be able to interprit.
import re
Define AI capabilities
AI_CAPABILITIES = {
“create_logo”: False, # AI cannot do this
“create_post”: True, # AI can do this
“generate_report”: True,
“design_graphics”: False
}
def extract_task(user_input):
“”"
Extracts the core task from user input using pattern matching.
Example: “Can you create a logo?” → “create_logo”
“”"
keywords = {
“logo”: “create_logo”,
“post”: “create_post”,
“report”: “generate_report”,
“design”: “design_graphics”
}
for keyword, task in keywords.items():
if re.search(rf"\b{keyword}\b", user_input, re.IGNORECASE):
return task
return None # If no task is identified, return None
def validate_capability(task):
“”"
Checks if AI can perform the requested task.
Returns True if capable, False otherwise.
“”"
return AI_CAPABILITIES.get(task, False)
def handle_query(user_input):
“”"
Processes the user input and generates an AI response.
“”"
task = extract_task(user_input)
if not task:
return "I'm not sure what you're asking. Can you clarify?"
if validate_capability(task):
return f"Yes, I can {task.replace('_', ' ')}!"
# Context-aware fallback suggestion instead of hard refusal
alternative_suggestions = {
"create_logo": "I can't design a logo, but I can suggest ideas!",
"design_graphics": "I don't have graphic tools, but I can help with layouts!",
}
return alternative_suggestions.get(task, f"No, I cannot {task.replace('_', ' ')}.")
Example Conversations
user_queries = [
“Can you create a logo?”,
“Can you create a post?”,
“Can you generate a report?”,
“Can you design graphics?”,
“Can you write a blog article?”
]
Simulate AI Responses
for query in user_queries:
print(f"User: {query}“)
print(f"AI: {handle_query(query)}\n”)