From write
Grammar and style checking using local LanguageTool server (localhost:8081). Detects grammar errors, style issues, and provides suggestions for improving text quality.
npx claudepluginhub pknull/asha-marketplace --plugin writeThis skill uses the workspace's default tool permissions.
This skill provides access to your local LanguageTool server running on `localhost:8081`. LanguageTool checks grammar, style, punctuation, and offers suggestions for improving text quality.
Creates isolated Git worktrees for feature branches with prioritized directory selection, gitignore safety checks, auto project setup for Node/Python/Rust/Go, and baseline verification.
Executes implementation plans in current session by dispatching fresh subagents per independent task, with two-stage reviews: spec compliance then code quality.
Dispatches parallel agents to independently tackle 2+ tasks like separate test failures or subsystems without shared state or dependencies.
This skill provides access to your local LanguageTool server running on localhost:8081. LanguageTool checks grammar, style, punctuation, and offers suggestions for improving text quality.
Server Status: Running at http://localhost:8081 Process: java -jar languagetool-server.jar (PID: 8834)
import requests
def check_text(text, language='en-US'):
url = 'http://localhost:8081/v2/check'
data = {
'text': text,
'language': language
}
response = requests.post(url, data=data)
return response.json()
# Example usage
text = "This is a example sentence with error."
result = check_text(text)
for match in result['matches']:
print(f"Issue: {match['message']}")
print(f"Context: {match['context']['text']}")
print(f"Suggestions: {', '.join([s['value'] for s in match['replacements'][:3]])}")
print()
curl -X POST 'http://localhost:8081/v2/check' \
--data 'text=This is a example sentence with error.' \
--data 'language=en-US'
import requests
def check_file(filepath, language='en-US'):
with open(filepath, 'r') as f:
text = f.read()
url = 'http://localhost:8081/v2/check'
data = {'text': text, 'language': language}
response = requests.post(url, data=data)
return response.json()
# Usage
result = check_file('document.txt')
print(f"Found {len(result['matches'])} issues")
def batch_check(texts, language='en-US'):
results = []
for text in texts:
result = check_text(text, language)
results.append({
'text': text,
'issues': len(result['matches']),
'matches': result['matches']
})
return results
# Usage
texts = [
"First sentence to check.",
"Second sentence with a error.",
"Third sentence is correct."
]
results = batch_check(texts)
def apply_first_suggestion(text, match):
"""Apply the first suggestion to fix an issue"""
offset = match['offset']
length = match['length']
replacement = match['replacements'][0]['value'] if match['replacements'] else ''
return text[:offset] + replacement + text[offset + length:]
# Usage
text = "This is a example."
result = check_text(text)
if result['matches']:
fixed_text = apply_first_suggestion(text, result['matches'][0])
print(f"Original: {text}")
print(f"Fixed: {fixed_text}")
def get_issues_by_type(result, issue_type='grammar'):
"""Filter matches by category: grammar, style, punctuation, etc."""
category_map = {
'grammar': 'GRAMMAR',
'style': 'STYLE',
'punctuation': 'PUNCTUATION',
'typo': 'TYPOS',
'misc': 'MISC'
}
category_id = category_map.get(issue_type, 'GRAMMAR')
return [m for m in result['matches']
if m['rule']['category']['id'] == category_id]
# Usage
result = check_text("This is a example with bad grammar, and style issues")
grammar_issues = get_issues_by_type(result, 'grammar')
print(f"Grammar issues: {len(grammar_issues)}")
Check available languages:
curl http://localhost:8081/v2/languages
Common language codes:
en-US - English (American)en-GB - English (British)de-DE - Germanfr - Frenches - Spanishpt-BR - Portuguese (Brazilian)def check_with_rules(text, enabled_rules=None, disabled_rules=None):
url = 'http://localhost:8081/v2/check'
data = {
'text': text,
'language': 'en-US'
}
if enabled_rules:
data['enabledRules'] = ','.join(enabled_rules)
if disabled_rules:
data['disabledRules'] = ','.join(disabled_rules)
response = requests.post(url, data=data)
return response.json()
# Disable passive voice checks
result = check_with_rules(
"The report was written by the team.",
disabled_rules=['PASSIVE_VOICE']
)
def check_with_words(text, ignored_words=None):
"""Check text while ignoring specific words (e.g., technical terms)"""
url = 'http://localhost:8081/v2/check'
data = {
'text': text,
'language': 'en-US'
}
if ignored_words:
# Add words that should be ignored
data['enabledRules'] = '' # Use specific rules if needed
response = requests.post(url, data=data)
return response.json()
Response structure:
{
"matches": [
{
"message": "Error description",
"shortMessage": "Brief description",
"replacements": [
{"value": "suggested replacement"}
],
"offset": 10,
"length": 5,
"context": {
"text": "...surrounding text...",
"offset": 5,
"length": 15
},
"rule": {
"id": "RULE_ID",
"description": "Rule description",
"category": {
"id": "GRAMMAR",
"name": "Grammar"
}
}
}
]
}
Use the included scripts in scripts/ directory:
check_text.py - Check a text stringcheck_file.py - Check a file and show issuesbatch_check.py - Check multiple filesapply_fixes.py - Automatically apply suggestionsdef check_markdown(filepath):
"""Check markdown file, preserving code blocks"""
with open(filepath, 'r') as f:
content = f.read()
# Simple approach: check the whole file
# For advanced: parse markdown and skip code blocks
result = check_text(content)
# Filter out issues in code blocks (basic)
filtered_matches = [m for m in result['matches']
if not is_in_code_block(content, m['offset'])]
return filtered_matches
def is_in_code_block(text, offset):
"""Check if offset is inside a markdown code block"""
before_text = text[:offset]
backticks = before_text.count('```')
return backticks % 2 == 1 # Odd number = inside block
def generate_report(filepath):
"""Generate a detailed grammar report"""
result = check_file(filepath)
print(f"Grammar Report for: {filepath}")
print("=" * 60)
print(f"Total issues found: {len(result['matches'])}\n")
# Group by category
by_category = {}
for match in result['matches']:
cat = match['rule']['category']['name']
by_category.setdefault(cat, []).append(match)
for category, matches in by_category.items():
print(f"\n{category} ({len(matches)} issues):")
print("-" * 40)
for m in matches:
print(f" • {m['message']}")
if m['replacements']:
print(f" Suggestion: {m['replacements'][0]['value']}")
# Check if server is running
ps aux | grep languagetool
# Check port
netstat -tlnp | grep 8081
# Restart server
java -jar languagetool-server.jar --port 8081
# Test connection
import requests
try:
response = requests.get('http://localhost:8081/v2/languages')
print(f"Server OK: {response.status_code}")
except Exception as e:
print(f"Server error: {e}")
Main endpoint: POST http://localhost:8081/v2/check
Parameters:
text (required): Text to checklanguage (required): Language code (e.g., 'en-US')enabledRules: Comma-separated rule IDs to enabledisabledRules: Comma-separated rule IDs to disableenabledCategories: Comma-separated category IDsdisabledCategories: Comma-separated category IDsOther endpoints:
GET /v2/languages - List supported languagesGET /v2/rule-examples - Get rule examples