import urllib import requests #import Python requests library #import Beautiful soup functions to parse the data returned from website from bs4 import BeautifulSoup scripFile = open("niftyStocks.txt") scripArray = scripFile.read().strip() scripList = scripArray.split("\n") #print("Total number of scrips in list = ", len(scripList)) mcapFile = "marketCap.txt" f = open(mcapFile, "w") i = 0 while i < len(scripList): urlNSE= "https://www.nseindia.com/live_market/" str1 = "dynaContent/live_watch/get_quote/GetQuote.jsp?symbol=" str2 = "&illiquid=0&smeFlag=0&itpFlag=0" name = urllib.parse.quote_plus(scripList[i]) url = urlNSE + str1 + name + str2 #htmlFile = urllib.urlopen(url) htmlFile = requests.get(url) #Status code 200 indicates if the page was downloaded successfully #htmlFile.status_code htmlText = BeautifulSoup(htmlFile.content, 'html.parser') #Traded value (INR Lakhs): span id = tradedValue, txt: totalTradedValue #Free-float M-CAP (INR Crores): span id = ffmid, txt: cm_ffm #search for elements by id / class / tag respText = htmlText.find('div', attrs={'id':'responseDiv'}).text clpText = respText.split('closePrice\":\"')[1] clPrice = clpText.split('\",\"')[0].replace(',', '') # fftText = respText.split('cm_ffm\":\"')[1] fftMcap = fftText.split('\",\"')[0].replace(',', '') # ttvText = respText.split('totalTradedValue\":\"')[1] ttValue = ttvText.split('\",\"')[0].replace(',', '') str = '{:>11}'.format(scripList[i])+ '; ' + '{:>10}'.format(clPrice) str = str + "; " + '{:>12}'.format(fftMcap) str = str + "; " + '{:>12}'.format(ttValue) print(str) f.write(str) f.write('\n') i = i + 1 f.close()