cne

crawler-ninja-expired

Expired domains finder for crawler.ninja

Showing:

Popularity

Downloads/wk

4

GitHub Stars

13

Maintenance

Last Commit

6yrs ago

Contributors

1

Package

Dependencies

4

License

Apache-2.0

Type Definitions

Tree-Shakeable

No?

Categories

Readme

Expired Domains Finder

This Crawler.Ninja plugin aims to find expired domains by crawling web sites.

Help & Forks welcome ! or please wait ... work in progress !

Actually, the expired domain data are stored in the a txt file. We plan to add more flexibilities in the upcoming releases.

How to install

$ npm install crawler-ninja crawler-ninja-expired simple-proxies --save

Crash course

var proxyLoader = require("simple-proxies/lib/proxyfileloader");
var crawler     = require("crawler-ninja");
var ep          = require("crawler-ninja-expired");
var cs          = require("crawler-ninja/plugins/console-plugin");


var proxyFile = "proxies.txt";

// Load proxies
var config = proxyLoader.config()
                        .setProxyFile(proxyFile)
                        .setCheckProxies(true)
                        .setRemoveInvalidProxies(true);

proxyLoader.loadProxyFile(config, function(error, proxyList) {
    if (error) {
      console.log(error);

    }
    else {
       crawl(proxyList);
    }

});


function crawl(proxyList){
  var end = function(){
    console.log("Crawl done !");
  };

  // Set the Crawl config
  crawler.init({
      externalDomains : true,
      externalHosts : true,
      firstExternalLinkOnly : true,
      images : false,
      scripts : false,
      links : false, //link tags used for css, canonical, ...
      followRedirect : true,
      retries : 0
  }, end, proxyList);

  var ed = new expired.Plugin({
       expiredTxtFile : "./logs/expireds.txt",
       majecticKey : "[your majecticKey]",
       whois : {user : "[your whoisxmlapi name]", password : "[your whoisxmlapi password]"}
  });

  var consolePlugin = new cs.Plugin();
  
  crawler.registerPlugin(consolePlugin);
  crawler.registerPlugin(ed);

  crawler.queue({url : "http://yourdomain.com"});
}


Using proxies is not mandatory but it is recommanded. Remove the attributes proxyList in the crawler constructor if you don't want to use proxies.

You can find all the crawler options on this page.

Rough todolist

  • Get the number of linked domains, anchor text infos, ...

Rate & Review

Great Documentation0
Easy to Use0
Performant0
Highly Customizable0
Bleeding Edge0
Responsive Maintainers0
Poor Documentation0
Hard to Use0
Slow0
Buggy0
Abandoned0
Unwelcoming Community0
100
No reviews found
Be the first to rate

Alternatives

No alternatives found

Tutorials

No tutorials found
Add a tutorial