Detects typosquatting, homograph phishing, and brand impersonation domains using dnstwist to generate variants, query DNS records, and compare webpage similarities.
npx claudepluginhub killvxk/cybersecurity-skills-zhThis skill uses the workspace's default tool permissions.
dnstwist 是一款域名置换引擎,用于生成外观相似的域名,以检测域名抢注、同形字符钓鱼(Phishing)攻击和品牌冒充。它使用字符替换、字符转置、字符插入、字符删除和同形字符替换等技术生成数千个域名变体,然后检查 DNS 记录(A、AAAA、NS、MX),使用模糊哈希(ssdeep)和感知哈希(pHash)计算网页相似度,并识别潜在恶意的已注册域名。
Generates domain permutations with dnstwist, resolves DNS records, and detects registered typosquatting, homograph phishing, and brand impersonation domains.
Uses dnstwist to generate domain permutations, resolve DNS records, and detect registered typosquatting, homograph phishing, and brand impersonation domains via fuzzy hashing similarity.
Monitors Certificate Transparency logs with crt.sh for historical queries and Certstream for real-time to detect phishing domains, impersonating certificates, and unauthorized issuances for your organization.
Share bugs, ideas, or general feedback.
dnstwist 是一款域名置换引擎,用于生成外观相似的域名,以检测域名抢注、同形字符钓鱼(Phishing)攻击和品牌冒充。它使用字符替换、字符转置、字符插入、字符删除和同形字符替换等技术生成数千个域名变体,然后检查 DNS 记录(A、AAAA、NS、MX),使用模糊哈希(ssdeep)和感知哈希(pHash)计算网页相似度,并识别潜在恶意的已注册域名。
dnstwist(pip install dnstwist[full])dnstwist 使用以下方式生成变体:addition(附加字符)、bitsquatting(位翻转错误)、homoglyph(视觉相似的 Unicode 字符,如 rn vs m)、hyphenation(添加连字符)、insertion(插入字符)、omission(删除字符)、repetition(重复字符)、replacement(替换为相邻键盘按键)、subdomain(插入点)、transposition(交换相邻字符)、vowel-swap(交换元音)和基于字典的(附加常用词)。
dnstwist 使用 ssdeep(局部敏感哈希)比较 HTML 内容,使用 pHash(感知哈希)比较网页截图。这有助于识别视觉上模仿合法站点的克隆钓鱼站点。高相似度分数表明可能是钓鱼页面。
典型工作流程为:生成域名变体 → 解析 DNS 记录 → 检查已注册域名 → 比较网页相似度 → 标记可疑域名 → 通知安全团队 → 申请域名撤销。对于典型企业域名,dnstwist 会生成 5,000-10,000 个变体。
import subprocess
import json
import csv
from datetime import datetime
def run_dnstwist_scan(domain, output_file=None):
"""对目标域名执行 dnstwist 扫描。"""
cmd = [
"dnstwist",
"--registered", # 只显示已注册的域名
"--format", "json", # JSON 格式输出
"--nameservers", "8.8.8.8,1.1.1.1",
"--threads", "50",
"--mxcheck", # 检查 MX 记录
"--ssdeep", # 模糊哈希比对
"--geoip", # GeoIP 查询
domain,
]
print(f"[*] 正在扫描 {domain} 的置换变体")
result = subprocess.run(cmd, capture_output=True, text=True, timeout=600)
if result.returncode == 0:
results = json.loads(result.stdout)
registered = [r for r in results if r.get("dns_a") or r.get("dns_aaaa")]
print(f"[+] 找到 {len(registered)} 个已注册的仿冒域名")
if output_file:
with open(output_file, "w") as f:
json.dump(registered, f, indent=2)
print(f"[+] 结果已保存到 {output_file}")
return registered
else:
print(f"[-] dnstwist 错误: {result.stderr}")
return []
results = run_dnstwist_scan("example.com", "typosquat_results.json")
def analyze_results(results, legitimate_ips=None):
"""分析 dnstwist 结果并按威胁优先排序。"""
legitimate_ips = legitimate_ips or set()
high_risk = []
medium_risk = []
low_risk = []
for entry in results:
domain = entry.get("domain", "")
fuzzer = entry.get("fuzzer", "")
dns_a = entry.get("dns_a", [])
dns_mx = entry.get("dns_mx", [])
ssdeep_score = entry.get("ssdeep_score", 0)
risk_score = 0
risk_factors = []
# 与合法站点的高度相似性
if ssdeep_score and ssdeep_score > 50:
risk_score += 40
risk_factors.append(f"高度网页相似性 ({ssdeep_score}%)")
# 有 MX 记录(可接收邮件 / 钓鱼)
if dns_mx:
risk_score += 20
risk_factors.append("有 MX 记录(具备邮件能力)")
# 近期注册(如果有 WHOIS 数据)
whois_created = entry.get("whois_created", "")
if whois_created:
try:
created = datetime.fromisoformat(whois_created.replace("Z", "+00:00"))
age_days = (datetime.now(created.tzinfo) - created).days
if age_days < 30:
risk_score += 30
risk_factors.append(f"近期注册({age_days} 天前)")
elif age_days < 90:
risk_score += 15
risk_factors.append(f"{age_days} 天前注册")
except (ValueError, TypeError):
pass
# 同形字符攻击风险最高
if fuzzer == "homoglyph":
risk_score += 25
risk_factors.append("同形字符(视觉上完全相同)")
elif fuzzer in ("addition", "replacement", "transposition"):
risk_score += 10
risk_factors.append(f"置换类型: {fuzzer}")
# 未指向合法基础设施
if dns_a and not set(dns_a).intersection(legitimate_ips):
risk_score += 10
risk_factors.append("IP 与合法服务器不同")
entry["risk_score"] = risk_score
entry["risk_factors"] = risk_factors
if risk_score >= 50:
high_risk.append(entry)
elif risk_score >= 25:
medium_risk.append(entry)
else:
low_risk.append(entry)
high_risk.sort(key=lambda x: x["risk_score"], reverse=True)
medium_risk.sort(key=lambda x: x["risk_score"], reverse=True)
print(f"\n=== 域名抢注分析 ===")
print(f"高风险: {len(high_risk)}")
print(f"中风险: {len(medium_risk)}")
print(f"低风险: {len(low_risk)}")
if high_risk:
print(f"\n--- 高风险域名 ---")
for entry in high_risk[:10]:
print(f" {entry['domain']} (分数: {entry['risk_score']})")
for factor in entry['risk_factors']:
print(f" - {factor}")
return {"high": high_risk, "medium": medium_risk, "low": low_risk}
analysis = analyze_results(results, legitimate_ips={"93.184.216.34"})
import time
import hashlib
class TyposquatMonitor:
def __init__(self, domains, known_domains_file="known_typosquats.json"):
self.domains = domains
self.known_file = known_domains_file
self.known_domains = self._load_known()
def _load_known(self):
try:
with open(self.known_file, "r") as f:
return json.load(f)
except FileNotFoundError:
return {}
def _save_known(self):
with open(self.known_file, "w") as f:
json.dump(self.known_domains, f, indent=2)
def scan_all_domains(self):
"""扫描所有监控域名,发现新的域名抢注。"""
new_findings = []
for domain in self.domains:
results = run_dnstwist_scan(domain)
for entry in results:
domain_key = entry.get("domain", "")
if domain_key not in self.known_domains:
entry["first_seen"] = datetime.now().isoformat()
entry["monitored_domain"] = domain
self.known_domains[domain_key] = entry
new_findings.append(entry)
print(f" [新发现] {domain_key} ({entry.get('fuzzer', '')})")
self._save_known()
print(f"\n[+] 新发现域名抢注: {len(new_findings)} 个")
return new_findings
def generate_alert(self, findings):
"""为新发现的高风险域名抢注生成告警。"""
analysis = analyze_results(findings)
alerts = []
for entry in analysis["high"]:
alerts.append({
"severity": "HIGH",
"domain": entry["domain"],
"target": entry.get("monitored_domain", ""),
"risk_score": entry["risk_score"],
"risk_factors": entry["risk_factors"],
"dns_a": entry.get("dns_a", []),
"dns_mx": entry.get("dns_mx", []),
"timestamp": datetime.now().isoformat(),
})
return alerts
monitor = TyposquatMonitor(["mycompany.com", "mycompany.org"])
new_findings = monitor.scan_all_domains()
alerts = monitor.generate_alert(new_findings)
def export_blocklist(analysis, output_file="blocklist.txt"):
"""将高风险域名导出为防火墙/代理的封锁列表。"""
domains = []
for entry in analysis["high"] + analysis["medium"]:
domain = entry.get("domain", "")
if domain:
domains.append(domain)
with open(output_file, "w") as f:
f.write(f"# 域名抢注封锁列表,生成时间 {datetime.now().isoformat()}\n")
for d in sorted(set(domains)):
f.write(f"{d}\n")
print(f"[+] 封锁列表已保存: {len(domains)} 个域名 -> {output_file}")
return domains
def generate_takedown_report(high_risk_domains):
"""生成域名撤销申请报告。"""
report = f"""# 域名撤销申请
生成时间: {datetime.now().isoformat()}
## 摘要
识别到 {len(high_risk_domains)} 个潜在的域名抢注/钓鱼域名。
## 需要撤销的域名
"""
for entry in high_risk_domains:
report += f"""
### {entry['domain']}
- **置换类型**: {entry.get('fuzzer', 'unknown')}
- **IP 地址**: {', '.join(entry.get('dns_a', ['N/A']))}
- **MX 记录**: {', '.join(entry.get('dns_mx', ['N/A']))}
- **风险分数**: {entry.get('risk_score', 0)}
- **风险因素**: {'; '.join(entry.get('risk_factors', []))}
- **网页相似度**: {entry.get('ssdeep_score', 'N/A')}%
"""
with open("takedown_report.md", "w") as f:
f.write(report)
print("[+] 撤销报告已生成: takedown_report.md")
export_blocklist(analysis)
generate_takedown_report(analysis["high"])