32 lines
1.9 KiB
Python
32 lines
1.9 KiB
Python
#!/usr/bin/env python3
|
|
import json, urllib.request
|
|
NEEDS = {'rag':{'w':10,'kw':['rag','retrieval','vector','embedding']},'skill_agent':{'w':10,'kw':['skill','agent','plugin','hook','mcp','claude']},'scraping':{'w':9,'kw':['scraper','crawl','playwright']},'llm_local':{'w':9,'kw':['ollama','llm','inference','gguf','vllm']},'security':{'w':8,'kw':['security','audit','vulnerability','nuclei']},'pharma_health':{'w':8,'kw':['pharma','health','medical','hcp']},'email':{'w':8,'kw':['email','smtp','deliverability']},'crm':{'w':7,'kw':['crm','lead','pipeline','sales']},'automation':{'w':7,'kw':['automation','workflow','n8n','telegram']}}
|
|
def score(r):
|
|
s=0;m=[]
|
|
t=(r.get('name','')+' '+(r.get('description','')or'')+' '+' '.join(r.get('topics',[]))).lower()
|
|
for n,c in NEEDS.items():
|
|
for k in c['kw']:
|
|
if k in t:s+=c['w'];m.append(n);break
|
|
st=r.get('stargazers_count',0)
|
|
if st>10000:s+=5
|
|
elif st>1000:s+=3
|
|
elif st>100:s+=1
|
|
l='';li=r.get('license')
|
|
if isinstance(li,dict):l=(li.get('spdx_id')or'').lower()
|
|
if l in['mit','apache-2.0']:s+=2
|
|
la=(r.get('language','')or'').lower()
|
|
if la in['python','php','javascript','typescript','shell']:s+=2
|
|
return s,list(set(m))
|
|
try:
|
|
req=urllib.request.Request('https://api.github.com/search/repositories?q=ai+agent+tool+created:>2026-03-01&sort=stars&order=desc&per_page=20',headers={'User-Agent':'WEVAL'})
|
|
data=json.loads(urllib.request.urlopen(req,timeout=15).read())
|
|
tr=[]
|
|
for r in data.get('items',[]):
|
|
sc,needs=score(r)
|
|
tr.append({'name':r['full_name'],'stars':r['stargazers_count'],'description':(r.get('description','')or'')[:150],'language':r.get('language','?'),'score':sc,'needs':needs,'url':r['html_url']})
|
|
tr.sort(key=lambda x:-x['score'])
|
|
json.dump({'ok':True,'trending':tr[:15]},open('/var/www/html/api/oss-trending.json','w'))
|
|
print(f'Trending: {len(tr)} repos cached')
|
|
except Exception as e:
|
|
print(f'Trending error: {e}')
|