Oviya commited on
Commit
51b884b
·
1 Parent(s): b59637a

update searchcompanies

Browse files
Files changed (2) hide show
  1. list.py +55 -233
  2. pytrade.py +1 -18
list.py CHANGED
@@ -1,18 +1,18 @@
1
  # -*- coding: utf-8 -*-
 
 
 
 
 
 
2
  from __future__ import annotations
3
  import csv, io, json, time, os
4
- from typing import Dict, List, Any, Optional
5
  from pathlib import Path
6
- from io import StringIO
7
 
8
  import requests
9
-
10
- # optional (for Wikipedia tables)
11
- try:
12
- import pandas as pd # requires: pip install pandas lxml
13
- HAS_PANDAS = True
14
- except Exception:
15
- HAS_PANDAS = False
16
 
17
  # ---------- configuration ----------
18
  UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127 Safari/537.36"
@@ -34,15 +34,8 @@ NIFTY_URLS: Dict[str, str] = {
34
  "NIFTY500": "https://www.niftyindices.com/IndexConstituent/ind_nifty500list.csv",
35
  }
36
 
37
- # Wikipedia pages for other markets
38
- WIKI_PAGES: Dict[str, str] = {
39
- "NASDAQ100": "https://en.wikipedia.org/wiki/NASDAQ-100",
40
- "DAX40": "https://en.wikipedia.org/wiki/DAX",
41
- "OMXS30": "https://en.wikipedia.org/wiki/OMX_Stockholm_30",
42
- }
43
-
44
- # Filters payload
45
- _MARKETS: Dict[str, Dict[str, List[Dict[str, str]]]] = {
46
  "India": {
47
  "NSE (National Stock Exchange)": [
48
  {"code": "NIFTY50", "name": "NIFTY 50"},
@@ -51,86 +44,34 @@ _MARKETS: Dict[str, Dict[str, List[Dict[str, str]]]] = {
51
  {"code": "NIFTYMID100", "name": "NIFTY Midcap 100"},
52
  {"code": "NIFTY500", "name": "NIFTY 500"},
53
  ]
54
- },
55
- "United States": {
56
- "NASDAQ": [
57
- {"code": "NASDAQ100", "name": "NASDAQ-100"}
58
- ]
59
- },
60
- "Germany": {
61
- "XETRA (Deutsche Börse)": [
62
- {"code": "DAX40", "name": "DAX 40"}
63
- ]
64
- },
65
- "Sweden": {
66
- "OMX Stockholm": [
67
- {"code": "OMXS30", "name": "OMX Stockholm 30"}
68
- ]
69
  }
70
  }
71
 
72
- # ---------- public API (for routes) ----------
73
- def get_markets() -> Dict[str, Dict[str, List[Dict[str, str]]]]:
74
- """Return filters structure used by UI."""
75
- return _MARKETS
76
-
77
- def build_companies_payload(code: str) -> Dict[str, Any]:
78
- """Return standardized payload for an index (with caching)."""
79
- code = (code or "").upper().strip()
80
- if not code:
81
- raise ValueError("Index code is required.")
82
-
83
- # try cache
84
- cached = _load_cache(code)
85
- if cached:
86
- return cached
87
-
88
- # route per code
89
- if code in NIFTY_URLS:
90
- url = NIFTY_URLS[code]
91
- text = _http_get_text(url)
92
- rows = _parse_nifty_csv(text)
93
- exchange, country, currency, source = "NSE", "IN", "INR", url
94
-
95
- elif code == "NASDAQ100":
96
- rows, exchange, country, currency, source = _parse_nasdaq100()
97
-
98
- elif code == "DAX40":
99
- rows, exchange, country, currency, source = _parse_dax40()
100
-
101
- elif code == "OMXS30":
102
- rows, exchange, country, currency, source = _parse_omxs30()
103
-
104
- else:
105
- raise ValueError(f"Unknown index code: {code}")
106
-
107
- payload = {
108
- "code": code,
109
- "exchange": exchange,
110
- "country": country,
111
- "currency": currency,
112
- "asOf": _now_iso_utc(),
113
- "count": len(rows),
114
- "constituents": rows,
115
- "source": source,
116
- }
117
- _save_cache(code, payload)
118
- return payload
119
-
120
- # ---------- internals ----------
121
- def _http_get_text(url: str, accept: str = "text/csv,*/*") -> str:
122
  sess = requests.Session()
123
- sess.headers.update({"User-Agent": UA, "Referer": REFERER, "Accept": accept})
124
- r = sess.get(url, timeout=30)
125
  r.raise_for_status()
126
  r.encoding = r.encoding or "utf-8"
127
  return r.text
128
 
129
- def _cache_path(code: str) -> Path:
 
 
 
 
 
 
 
 
 
 
 
130
  return CACHE_DIR / f"{code.lower()}.json"
131
 
132
- def _load_cache(code: str) -> Optional[Any]:
133
- fp = _cache_path(code)
134
  if not fp.exists():
135
  return None
136
  age = time.time() - fp.stat().st_mtime
@@ -139,154 +80,35 @@ def _load_cache(code: str) -> Optional[Any]:
139
  with fp.open("r", encoding="utf-8") as f:
140
  return json.load(f)
141
 
142
- def _save_cache(code: str, payload: Any) -> None:
143
- fp = _cache_path(code)
144
  with fp.open("w", encoding="utf-8") as f:
145
  json.dump(payload, f, ensure_ascii=False, indent=2)
146
 
147
- def _now_iso_utc() -> str:
148
- return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
149
-
150
- def _parse_nifty_csv(text: str) -> List[Dict[str, str]]:
151
- out: List[Dict[str, str]] = []
152
- rdr = csv.DictReader(io.StringIO(text))
153
- for row in rdr:
154
- sym = (row.get("Symbol") or "").strip()
155
- name = (row.get("Company Name") or "").strip()
156
- if sym and name:
157
- out.append({"symbol": f"{sym}.NS", "company": name})
158
- return out
159
-
160
- # ---- Wikipedia helpers ----
161
- def _fetch_wiki_tables(url: str):
162
- if not HAS_PANDAS:
163
- raise RuntimeError("pandas/lxml not installed. Run: pip install pandas lxml")
164
- html = _http_get_text(url, accept="text/html,*/*")
165
- return pd.read_html(StringIO(html))
166
-
167
- def _pick_table_and_columns(tables, ticker_candidates, company_candidates):
168
- for t in tables:
169
- cols_map = {str(c).strip().lower(): c for c in t.columns}
170
- ticker_col = next((cols_map[c] for c in ticker_candidates if c in cols_map), None)
171
- company_col = next((cols_map[c] for c in company_candidates if c in cols_map), None)
172
- if ticker_col is not None and company_col is not None:
173
- return t, ticker_col, company_col
174
- raise RuntimeError(
175
- f"No suitable table found. Ticker in {ticker_candidates}, company in {company_candidates}."
176
- )
177
-
178
- def _parse_wiki_constituents(url: str, ticker_candidates, company_candidates, suffix: str, upper_tickers: bool) -> List[Dict[str, str]]:
179
- tables = _fetch_wiki_tables(url)
180
- df, t_col, c_col = _pick_table_and_columns(tables, ticker_candidates, company_candidates)
181
- rows: List[Dict[str, str]] = []
182
- for sym, name in zip(df[t_col], df[c_col]):
183
- s = str(sym).strip()
184
- if not s or not str(name).strip():
185
- continue
186
- if upper_tickers:
187
- s = s.upper()
188
- rows.append({"symbol": f"{s}{suffix}", "company": str(name).strip()})
189
- if not rows:
190
- raise RuntimeError("Parsed zero rows from Wikipedia table.")
191
- return rows
192
-
193
- def _parse_nasdaq100():
194
- url = WIKI_PAGES["NASDAQ100"]
195
- rows = _parse_wiki_constituents(
196
- url,
197
- ticker_candidates=["ticker", "symbol"],
198
- company_candidates=["company", "name"],
199
- suffix="",
200
- upper_tickers=True,
201
- )
202
- return rows, "NASDAQ", "US", "USD", url
203
-
204
- def _parse_dax40():
205
- url = WIKI_PAGES["DAX40"]
206
- rows = _parse_wiki_constituents(
207
- url,
208
- ticker_candidates=["ticker symbol", "ticker", "symbol"],
209
- company_candidates=["company", "name"],
210
- suffix=".DE",
211
- upper_tickers=True,
212
- )
213
- return rows, "XETRA", "DE", "EUR", url
214
-
215
- def _parse_omxs30():
216
- url = WIKI_PAGES["OMXS30"]
217
- rows = _parse_wiki_constituents(
218
- url,
219
- ticker_candidates=["ticker", "symbol"],
220
- company_candidates=["company", "name"],
221
- suffix=".ST",
222
- upper_tickers=True,
223
- )
224
- return rows, "OMX Stockholm", "SE", "SEK", url
225
-
226
-
227
- def _all_supported_index_codes(markets: Dict[str, Dict[str, List[Dict[str, str]]]]) -> list[str]:
228
- codes: list[str] = []
229
- for _country, exchanges in markets.items():
230
- for _exch, refs in exchanges.items():
231
- for ref in refs:
232
- codes.append(ref["code"])
233
- return codes
234
-
235
- def _index_display_name(code: str, markets: Dict[str, Dict[str, List[Dict[str, str]]]]) -> str:
236
- cu = code.upper()
237
- for _country, exchanges in markets.items():
238
- for _exch, refs in exchanges.items():
239
- for ref in refs:
240
- if ref["code"].upper() == cu:
241
- return ref.get("name", cu)
242
- return cu
243
-
244
- def search_companies(q: str,
245
- indices: Optional[List[str]] = None,
246
- limit: int = 50) -> List[Dict[str, Any]]:
247
- """
248
- Global search across supported indices (cached via build_companies_payload).
249
- Returns items: {symbol, company, indexCode, indexName, exchange, country}
250
- """
251
- q_norm = (q or "").strip().lower()
252
- if not q_norm:
253
- return []
254
-
255
- markets = get_markets() # use the same source as your filters
256
- index_codes = indices or _all_supported_index_codes(markets)
257
-
258
- results: List[Dict[str, Any]] = []
259
- for code in index_codes:
260
- try:
261
- payload = build_companies_payload(code) # already cached in your module
262
- except Exception:
263
- continue
264
 
265
- idx_name = _index_display_name(code, markets)
266
- for row in payload.get("constituents", []):
267
- sym = str(row.get("symbol", "")).strip()
268
- com = str(row.get("company", "")).strip()
269
- if not sym or not com:
270
- continue
271
- s_low, c_low = sym.lower(), com.lower()
272
- if q_norm in s_low or q_norm in c_low:
273
- results.append({
274
- "symbol": sym,
275
- "company": com,
276
- "indexCode": payload.get("code"),
277
- "indexName": idx_name,
278
- "exchange": payload.get("exchange"),
279
- "country": payload.get("country"),
280
- })
281
- if len(results) >= limit:
282
- break
283
- if len(results) >= limit:
284
- break
285
 
286
- # Simple ranking: exact → startswith → contains
287
- def rank(item):
288
- sym, com = item["symbol"].lower(), item["company"].lower()
289
- return 0 if (sym == q_norm or com == q_norm) else 1 if (sym.startswith(q_norm) or com.startswith(q_norm)) else 2
 
 
 
 
 
 
 
 
290
 
291
- results.sort(key=rank)
292
- return results[:limit]
 
1
  # -*- coding: utf-8 -*-
2
+ """
3
+ Minimal API for PY-Trade filters & companies
4
+ - /getfilters -> countries -> exchanges -> indices
5
+ - /getcompanies?code=NIFTY50 -> { code, asOf, count, constituents[] }
6
+ """
7
+
8
  from __future__ import annotations
9
  import csv, io, json, time, os
10
+ from typing import Dict, List, Any
11
  from pathlib import Path
 
12
 
13
  import requests
14
+ from flask import Flask, request, jsonify
15
+ from flask_cors import CORS
 
 
 
 
 
16
 
17
  # ---------- configuration ----------
18
  UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127 Safari/537.36"
 
34
  "NIFTY500": "https://www.niftyindices.com/IndexConstituent/ind_nifty500list.csv",
35
  }
36
 
37
+ # Filters payload for the UI (add more countries/exchanges here later)
38
+ MARKETS: Dict[str, Dict[str, List[Dict[str, str]]]] = {
 
 
 
 
 
 
 
39
  "India": {
40
  "NSE (National Stock Exchange)": [
41
  {"code": "NIFTY50", "name": "NIFTY 50"},
 
44
  {"code": "NIFTYMID100", "name": "NIFTY Midcap 100"},
45
  {"code": "NIFTY500", "name": "NIFTY 500"},
46
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  }
48
  }
49
 
50
+ # ---------- utilities ----------
51
+ def http_get_text(url: str) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  sess = requests.Session()
53
+ sess.headers.update({"User-Agent": UA, "Referer": REFERER, "Accept": "text/csv,*/*"})
54
+ r = sess.get(url, timeout=25)
55
  r.raise_for_status()
56
  r.encoding = r.encoding or "utf-8"
57
  return r.text
58
 
59
+ def parse_nifty_csv(text: str) -> List[Dict[str, str]]:
60
+ # Columns: Company Name, Industry, Symbol, Series, ISIN Code
61
+ out: List[Dict[str, str]] = []
62
+ rdr = csv.DictReader(io.StringIO(text))
63
+ for row in rdr:
64
+ sym = (row.get("Symbol") or "").strip()
65
+ name = (row.get("Company Name") or "").strip()
66
+ if sym and name:
67
+ out.append({"symbol": f"{sym}.NS", "company": name})
68
+ return out
69
+
70
+ def cache_path(code: str) -> Path:
71
  return CACHE_DIR / f"{code.lower()}.json"
72
 
73
+ def load_cache(code: str) -> Any | None:
74
+ fp = cache_path(code)
75
  if not fp.exists():
76
  return None
77
  age = time.time() - fp.stat().st_mtime
 
80
  with fp.open("r", encoding="utf-8") as f:
81
  return json.load(f)
82
 
83
+ def save_cache(code: str, payload: Any) -> None:
84
+ fp = cache_path(code)
85
  with fp.open("w", encoding="utf-8") as f:
86
  json.dump(payload, f, ensure_ascii=False, indent=2)
87
 
88
+ def build_companies_payload(code: str) -> Dict[str, Any]:
89
+ code = code.upper()
90
+ # 1) try cache
91
+ cached = load_cache(code)
92
+ if cached:
93
+ return cached
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
+ # 2) fetch official CSV
96
+ url = NIFTY_URLS.get(code)
97
+ if not url:
98
+ raise ValueError(f"Unknown index code: {code}")
99
+ text = http_get_text(url)
100
+ rows = parse_nifty_csv(text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
+ payload = {
103
+ "code": code,
104
+ "exchange": "NSE",
105
+ "country": "IN",
106
+ "currency": "INR",
107
+ "asOf": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
108
+ "count": len(rows),
109
+ "constituents": rows,
110
+ "source": url,
111
+ }
112
+ save_cache(code, payload)
113
+ return payload
114
 
 
 
pytrade.py CHANGED
@@ -2,7 +2,7 @@ from flask import Flask, request, jsonify
2
  from flask_cors import CORS
3
  from flask import Response
4
  from analysestock import analysestock
5
- from list import build_companies_payload, MARKETS,search_companies
6
  import yfinance as yf
7
  import json
8
  import os
@@ -43,23 +43,6 @@ def get_companies():
43
  except Exception as e:
44
  return jsonify({"error": str(e)}), 500
45
 
46
- @app.get("/searchcompanies")
47
- def http_search_companies():
48
- q = request.args.get("q", "")
49
- if not q.strip():
50
- return jsonify({"query": q, "count": 0, "items": []})
51
- try:
52
- limit = int(request.args.get("limit", "50"))
53
- limit = 50 if limit <= 0 else 200 if limit > 200 else limit
54
- except Exception:
55
- limit = 50
56
-
57
- indices_param = request.args.get("indices")
58
- indices = [x.strip().upper() for x in indices_param.split(",") if x.strip()] if indices_param else None
59
-
60
- items = search_companies(q, indices=indices, limit=limit)
61
- return jsonify({"query": q, "count": len(items), "items": items})
62
-
63
 
64
  @app.route('/analysestock', methods=['POST'])
65
  def analyze_all():
 
2
  from flask_cors import CORS
3
  from flask import Response
4
  from analysestock import analysestock
5
+ from list import build_companies_payload, MARKETS
6
  import yfinance as yf
7
  import json
8
  import os
 
43
  except Exception as e:
44
  return jsonify({"error": str(e)}), 500
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  @app.route('/analysestock', methods=['POST'])
48
  def analyze_all():