Common Issues & Solutions
1. Installation Issues
Module Not Found
Error: ModuleNotFoundError: No module named 'pynions'
Solution:
# Verify virtual environment is activated
which python
# Should show: ~/Documents/pynions/venv/bin/python
# If not, activate venv:
source venv/bin/activate
# Reinstall requirements
pip install -r requirements.txt
Playwright Issues
Error: Browser executable not found
Solution:
# Install browsers
playwright install
# If that fails, try with sudo
sudo playwright install
# Verify installation
playwright --version
2. API Issues
Serper API Issues
API Key Not Found
Error: SERPER_API_KEY not found in environment variables
Solution:
# Check if environment variables are loaded
python -c "import os; print(os.getenv('SERPER_API_KEY'))"
# If None, verify .env file contains:
SERPER_API_KEY = your_serper_key_here
# Reload environment:
source venv/bin/activate
Invalid Response Format
Error: Serper API error: 401 (Invalid API key)
Solution:
Verify API key is valid in Serper dashboard
Check API service status
Monitor credit usage
Rate Limits
Error: 429 Too Many Requests
Solution:
# Add retry logic to config.json
{
"plugins" : {
"serper" : {
"max_results" : 10 ,
"retry_attempts" : 3 ,
"retry_delay" : 5
}
}
}
3. Workflow Issues
Step Execution Failure
Error: Step 'fetch_serp' failed: Connection timeout
Debug Steps:
Test SerperWebSearch independently:
# Test search independently
async def test_search ():
searcher = SerperWebSearch ({
"max_results" : 10
})
return await searcher. execute ({
"query" : "test query"
})
# Run test
await test_search ()
Enable debug logging:
import logging
logging. basicConfig ( level = logging. DEBUG )
4. Data Storage Issues
Permission Errors
Error: Permission denied: './data/results.json'
Solution:
# Fix permissions
chmod 755 data
chmod 644 data/ * .json
# Verify
ls -la data/
1. Logging
Enable detailed logging:
# In your script
import logging
logging. basicConfig (
level = logging. DEBUG ,
format = ' %(asctime)s - %(name)s - %(levelname)s - %(message)s ' ,
handlers = [
logging. FileHandler ( 'debug.log' ),
logging. StreamHandler ()
]
)
2. Interactive Debugging
Using iPython:
# Install iPython
pip install ipython
# Start interactive session
ipython
# Import and test components
from pynions.core import *
3. Visual Studio Code Debugging
Create .vscode/launch.json
:
{
"version" : "0.2.0" ,
"configurations" : [
{
"name" : "Python: Current File" ,
"type" : "python" ,
"request" : "launch" ,
"program" : "${file}" ,
"console" : "integratedTerminal" ,
"justMyCode" : true
}
]
}
Set breakpoints and run debugger
1. Time Profiling
import time
class TimingWorkflowStep ( WorkflowStep ):
async def execute ( self , input_data ):
start_time = time. time ()
result = await super (). execute (input_data)
duration = time. time () - start_time
print ( f "Step { self .name } took { duration :.2f } seconds" )
return result
2. Memory Profiling
# Install memory profiler
pip install memory_profiler
# Run with profiling
python -m memory_profiler your_script.py
Testing
1. Unit Tests
# tests/test_plugins/test_serper.py
import pytest
from pynions.plugins.serper import SerperWebSearch
@pytest . mark . asyncio
async def test_serper_search ():
searcher = SerperWebSearch ({
"max_results" : 10
})
result = await searcher. execute ({
"query" : "test query"
})
assert result is not None
assert "organic" in result
assert "peopleAlsoAsk" in result
assert "relatedSearches" in result
2. Integration Tests
# tests/test_workflows/test_serp_workflow.py
import pytest
from pynions.core import Workflow
from pynions.plugins.serper import SerperWebSearch
@pytest . mark . asyncio
async def test_serp_workflow ():
workflow = Workflow ( "serp_test" )
workflow. add_step ( WorkflowStep (
plugin = SerperWebSearch ({ "max_results" : 10 }),
name = "fetch_serp"
))
result = await workflow. execute ({
"query" : "test query"
})
assert result is not None
Monitoring
1. Basic Monitoring
class MonitoredWorkflow ( Workflow ):
def __init__ ( self , * args , ** kwargs ):
super (). __init__ (*args, **kwargs)
self .start_time = None
self .metrics = {}
async def execute ( self , input_data ):
self .start_time = time. time ()
try :
result = await super (). execute (input_data)
self .metrics[ 'duration' ] = time. time () - self .start_time
self .metrics[ 'success' ] = True
return result
except Exception as e:
self .metrics[ 'success' ] = False
self .metrics[ 'error' ] = str (e)
raise
2. Resource Monitoring
import psutil
def log_system_metrics ():
metrics = {
'cpu_percent' : psutil. cpu_percent (),
'memory_percent' : psutil. virtual_memory ().percent,
'disk_usage' : psutil. disk_usage ( '/' ).percent
}
logging. info ( f "System metrics: { metrics } " )
Best Practices
Always use virtual environment
Keep logs for debugging
Test components individually
Monitor resource usage
Use version control
Document errors and solutions
Getting Help
Check logs first
Review documentation
Test in isolation
Use debugging tools
Ask specific questions
Common Serper Response Issues
1. Missing Data Fields
If certain fields are missing from the response:
# Check if fields exist before accessing
if "peopleAlsoAsk" in result and result[ "peopleAlsoAsk" ]:
# Process people also ask data
pass
if "relatedSearches" in result and result[ "relatedSearches" ]:
# Process related searches
pass
2. Rate Limit Monitoring
# Monitor credit usage
if "credits" in result:
logging. info ( f "Credits used: { result[ 'credits' ] } " )
if result[ "credits" ] > threshold:
logging. warning ( "High credit usage detected" )
3. Response Validation
def validate_serper_response ( result ):
"""Validate Serper API response"""
required_fields = [ "searchParameters" , "organic" ]
for field in required_fields:
if field not in result:
logging. error ( f "Missing required field: { field } " )
return False
return True