Skip to content
Snippets Groups Projects
Commit d2900242 authored by vcday's avatar vcday
Browse files

some parser tests working

parent 21ea7013
No related branches found
No related tags found
No related merge requests found
......@@ -19,6 +19,8 @@ add_executable(Parser2Test
parser/tests/parserTest2.cpp
crawler/Readers/StreamReader.h
crawler/Readers/LocalReader.cpp
crawler/Readers/HttpReader.cpp
crawler/Readers/HttpsReader.cpp
)
add_executable(crawler-parser-test
......
......@@ -30,10 +30,7 @@ void Parser::parse ( StreamReader *reader, Tokenizer *tokenizer )
{
unsigned long htmlIt = 0;
unsigned long offsetTitle = 0;
unsigned long offsetURL = 0;
unsigned long offsetAnchor = 0;
unsigned long offsetBody = 0;
unsigned long offset = 0;
ParsedUrl currentUrl = reader->getUrl( );
// tokenize anchor
......@@ -41,10 +38,10 @@ void Parser::parse ( StreamReader *reader, Tokenizer *tokenizer )
string anchorText = currentUrl.getAnchorText( );
if ( anchorText != "" )
{
offsetAnchor = tokenizer->execute( anchorText, offsetAnchor, Tokenizer::ANCHOR );
offset = tokenizer->execute( anchorText, offset, Tokenizer::ANCHOR );
}
// tokenize url
offsetURL = tokenizer->execute( currentUrl.getHost( ) + "/" + currentUrl.getPath( ), offsetURL, Tokenizer::URL );
offset = tokenizer->execute( currentUrl.getHost( ) + "/" + currentUrl.getPath( ), offset, Tokenizer::URL );
string html = reader->PageToString( );
while ( htmlIt < html.size( ) )
......@@ -72,7 +69,6 @@ void Parser::parse ( StreamReader *reader, Tokenizer *tokenizer )
string line = subStr( html, htmlIt, endCloseTag + 1 - htmlIt );
htmlIt = endCloseTag + 2;
// check if line is url
string title = extractTitle( line );
string url = extractUrl( line );
......@@ -85,8 +81,8 @@ void Parser::parse ( StreamReader *reader, Tokenizer *tokenizer )
//checking for p tag
else if ( isParagraph )
{
string body = extractBody( line, offsetTitle, offsetBody, isParagraph, tokenizer, currentUrl );
offsetBody = tokenizer->execute( body, offsetBody, Tokenizer::BODY );
string body = extractBody( line, offset, offset, isParagraph, tokenizer, currentUrl );
offset = tokenizer->execute( body, offset, Tokenizer::BODY );
}
// if html line is url, parses accordingly and pushes to frontier
......@@ -97,12 +93,12 @@ void Parser::parse ( StreamReader *reader, Tokenizer *tokenizer )
// check if line is header; classifies as body text
else if ( header != "")
{
offsetBody = tokenizer->execute( header, offsetBody, Tokenizer::BODY );
offset = tokenizer->execute( header, offset, Tokenizer::BODY );
}
// check if line is title
else if ( title != "" )
{
offsetTitle = tokenizer->execute( title, offsetTitle, Tokenizer::TITLE );
offset = tokenizer->execute( title, offset, Tokenizer::TITLE );
}
else
{
......@@ -184,7 +180,7 @@ string Parser::extractTitle ( string html )
{
string title = "";
char end = '<';
auto pos = findStr( "<title>", html );
auto pos = findStr( "<title", html );
if ( pos < html.size( ) )
{
pos += 7;
......@@ -368,7 +364,8 @@ string Parser::extractBody ( string html, unsigned long & offsetTitle, unsigned
ParsedUrl & currentUrl )
{
string body = "";
unsigned long startParTag = findNext( "<p>", 0, html );
unsigned long startParTag = findNext( "<p", 0, html );
startParTag = findNext( ">", startParTag, html) - 1;
unsigned long closeParTag = findNext( "</p>", startParTag, html );
unsigned long nextCloseTag = findNext( "</", startParTag, html );
startParTag += 3;
......
//
// Created by Ben Bergkamp on 3/21/18.
//
#include <string>
#include <cstring>
#include <vector>
#include <cassert>
#include <unordered_map>
#include <iostream>
#include "../Parser.h"
#include "../../shared/Document.h"
#include "../../shared/ProducerConsumerQueue.h"
#include "../../crawler/Readers/LocalReader.h"
#include "../../crawler/Readers/HttpReader.h"
#include "../../crawler/Readers/HttpsReader.h"
#include "../../util/util.h"
using namespace std;
void TestSimple()
{
ProducerConsumerQueue< ParsedUrl > urlFrontierTest;
void testSimple( );
void testComplex( );
void testURL( );
Parser parser( &urlFrontierTest );
void printDictionary ( unordered_map< string, vector< unsigned long > > dictionary );
int main ( )
{
cout << "Testing Parser ... " << endl << endl;
testSimple( );
testComplex( );
testURL( );
cout << "Parser Tests Passed! :D" << endl;
}
ParsedUrl fake_url = ParsedUrl("http://www.cats.com");
void printDictionary ( const unordered_map< string, vector< unsigned long > > dictionary )
{
for ( auto it = dictionary.begin( ); it != dictionary.end( ); it++ )
{
cout << it->first << ':';
for ( int i = 0; i < it->second.size( ); ++i )
{
cout << it->second[ i ] << " ";
}
cout << std::endl;
}
}
string filepath = util::GetCurrentWorkingDir() + "/tests/plaintext.txt";
LocalReader reader(filepath);
reader.setUrl(fake_url);
auto success = reader.request();
if(!success)
void testSimple ( )
{
cerr << "Couldnt open file\n";
exit(1);
}
cout << "Testing Simple: " << endl;
ProducerConsumerQueue< ParsedUrl > urlFrontierTest;
Parser parser( &urlFrontierTest );
ParsedUrl fake_url = ParsedUrl( "http://www.cats.com" );
string filepath = util::GetCurrentWorkingDir( ) + "/tests/plaintext.txt";
LocalReader reader( filepath );
reader.setUrl( fake_url );
auto success = reader.request( );
if ( !success )
{
cerr << "Couldn't open file\n";
exit( 1 );
}
auto dictionary = parser.execute( &reader );
printDictionary( *dictionary );
for ( auto it = dictionary->begin( ); it != dictionary->end( ); it++ )
assert ( dictionary != nullptr );
assert ( dictionary->size( ) == 13 );
assert ( dictionary->at( "%goodby" ).size( ) == 1 && dictionary->at( "%goodby" )[ 0 ] == 12 );
assert ( dictionary->at( "%battl" ).size( ) && dictionary->at( "%battl" )[ 0 ] == 7 );
assert ( dictionary->at( "%bottl" ).size( ) == 2 && dictionary->at( "%bottl" )[ 0 ] == 9 && dictionary->at( "%bottl" )[ 1 ] == 10 );
assert ( dictionary->at( "%hello" ).size( ) == 2 && dictionary->at( "%hello" )[ 0 ] == 11 && dictionary->at( "%hello" )[ 1 ] == 13 );
assert ( dictionary->at( "#aardvark" ).size( ) == 1 && dictionary->at( "#aardvark" )[ 0 ] == 3 );
assert ( dictionary->at( "%bridg" ).size( ) == 1 && dictionary->at( "%bridg" )[ 0 ] == 8 );
assert ( dictionary->at( "%bas" ).size( ) == 1 && dictionary->at( "%bas" )[ 0 ] == 6 );
assert ( dictionary->at( "#anteat" ).size( ) == 1 && dictionary->at( "#anteat" )[ 0 ] == 4 );
assert ( dictionary->at( "$cat" ).size( ) == 1 && dictionary->at( "$cat" )[ 0 ] == 0 );
assert ( dictionary->at( "$com" ).size( ) == 1 && dictionary->at( "$com" )[ 0 ] == 1 );
assert ( dictionary->at( "=www.cats.com/" ).size( ) == 1 && dictionary->at( "=www.cats.com/" )[ 0 ] == 0 );
assert ( dictionary->at( "#appl" ).size( ) == 1 && dictionary->at( "#appl" )[ 0 ] == 2 );
assert ( dictionary->at( "#allig" ).size( ) == 1 && dictionary->at( "#allig" )[ 0 ] == 5 );
delete dictionary;
dictionary = nullptr;
cout << "Simple Test Passed!" << endl << endl;
}
void testComplex( )
{
cout << it->first << ':';
for ( int i = 0; i < it->second.size( ); ++i )
cout << "Testing Complex: " << endl;
ProducerConsumerQueue< ParsedUrl > urlFrontierTest;
Parser parser( &urlFrontierTest );
ParsedUrl httpURL = ParsedUrl( "www.veronicacday.com" );
HttpReader reader( httpURL );
auto success = reader.request( );
if ( !success )
{
cout << it->second[ i ] << " ";
cerr << "Couldn't open file\n";
exit( 1 );
}
cout << std::endl;
}
auto dictionary = parser.execute( &reader );
printDictionary( *dictionary );
}
int main()
void testURL ( )
{
TestSimple();
cout << "Testing URL: " << endl;
ProducerConsumerQueue< ParsedUrl > urlFrontierTest;
Parser parser( &urlFrontierTest );
ParsedUrl fake_url = ParsedUrl( "http://testurl.com" );
string filepath = util::GetCurrentWorkingDir( ) + "/tests/urlTest.html";
LocalReader reader( filepath );
reader.setUrl( fake_url );
auto success = reader.request( );
if ( !success )
{
cerr << "Couldn't open file\n";
exit( 1 );
}
auto dictionary = parser.execute( &reader );
printDictionary( *dictionary );
assert ( dictionary != nullptr );
assert ( dictionary->size( ) == 3 );
assert ( dictionary->at( "=testurl.com/" )[ 0 ] == 0 );
assert ( urlFrontierTest.Pop( ).getCompleteUrl( ) == "http://www.bafta.org/" );
assert ( dictionary->find( "$bafta" ) == dictionary->end( ) );
assert ( dictionary->at( "$testurl" )[ 0 ] == 0 );
assert ( dictionary->at( "$com" )[ 0 ] == 1 );
delete dictionary;
dictionary = nullptr;
cout << "URL Test Passed!" << endl << endl;
}
\ No newline at end of file
//
#include <string>
#include <cstring>
#include <cassert>
......@@ -6,237 +6,128 @@
#include "../Parser.h"
#include "../../shared/Document.h"
#include "../../shared/ProducerConsumerQueue.h"
using namespace std;
void testSimple ( );
void testComplex ( );
void testURL ( );
void testExtractBody( );
void testBody( );
//
//using namespace std;
//
//void testSimple ( );
//
//void testComplex ( );
//
//void testURL ( );
//
//void testExtractBody ( );
//
//void testBody ( );
//
int main ( )
{
cout << "Testing Parser ... " << endl << endl;
cout << "Testing URL: " << endl;
testURL( );
cout << "URL Test Passed!" << endl << endl;
cout << "Testing Simple: " << endl;
testSimple( );
cout << "Simple Test Passed!" << endl << endl;
cout << "Testing Complex: " << endl;
testComplex( );
cout << "Complex Test Passed!" << endl;
cout << "Testing BODY: " << endl;
testExtractBody( );
testBody( );
cout << "Parser Tests Passed! :D" << endl;
}
void testSimple( )
{
ProducerConsumerQueue< ParsedUrl > urlFrontierTest;
ParsedUrl url = ParsedUrl( "http://www.testurl.com" );
char docString[10240];
strcpy( docString, "<title>This Cat Title Cat</title>" );
Document document( url, docString );
Parser parser( &urlFrontierTest );
auto dictionary = parser.execute( &document );
for ( auto it = dictionary->begin( ); it != dictionary->end( ); it++ )
{
cout << it->first << ':';
for ( int i = 0; i < it->second.size( ); ++i )
{
cout << it->second[ i ] << " ";
}
cout << std::endl;
}
assert ( dictionary != nullptr );
assert ( dictionary->size( ) == 4 );
assert ( dictionary->find( "#cat" ) != dictionary->end( ) );
assert ( dictionary->find( "$testurl" ) != dictionary->end( ) );
assert ( dictionary->find( "#titl" ) != dictionary->end( ) );
assert ( dictionary->find( "#this" ) == dictionary->end( ) );
assert ( dictionary->at( "#cat" )[ 0 ] == 0 && dictionary->at( "#cat" )[ 1 ] == 2 );
assert ( dictionary->at( "#titl" )[ 0 ] == 1 );
delete dictionary;
dictionary = nullptr;
}
void testComplex ( )
{
ProducerConsumerQueue< ParsedUrl > urlFrontierTest;
ifstream file( "../tests/cats.html" );
string temp;
string docString = "<title>Joe the Cat</title>\n";
docString += "<a href=\"https://www.w3schools.com/html/\">Visit our HTML tutorial</a>\n";
while ( std::getline( file, temp ) )
{
docString += temp;
}
ParsedUrl url = ParsedUrl( "https://www.w3schools.com/tests/cats.html" );
char *writable = new char[docString.size( ) + 1];
std::copy( docString.begin( ), docString.end( ), writable );
writable[ docString.size( ) ] = '\0';
Document document( url, writable );
Parser parser( &urlFrontierTest );
auto dictionary = parser.execute( &document );
for ( auto it = dictionary->begin( ); it != dictionary->end( ); it++ )
{
cout << it->first << ':';
for ( int i = 0; i < it->second.size( ); ++i )
{
cout << it->second[ i ] << " ";
}
cout << std::endl;
}
assert ( dictionary != nullptr );
assert ( dictionary->size( ) == 8 );
assert ( dictionary->find( "#cat" ) != dictionary->end( ) );
assert ( dictionary->find( "#stori" ) != dictionary->end( ) );
assert ( dictionary->find( "#joe" ) != dictionary->end( ) );
assert ( dictionary->find( "$w3school" ) != dictionary->end( ) );
assert ( dictionary->find( "$test" ) != dictionary->end( ) );
assert ( dictionary->find( "$cat" ) != dictionary->end( ) );
assert ( dictionary->find( "#the" ) == dictionary->end( ) );
assert ( dictionary->find( "#of" ) == dictionary->end( ) );
delete dictionary;
dictionary = nullptr;
delete[] writable;
writable = nullptr;
}
void testURL ( )
{
// const char *line = "<li><span class=\"official-website\"><span class=\"url\"><a rel=\"nofollow\" class=\"external text\" href=\"http://www.bafta.org/\">Official website</a></span></span></li>";
const char *line = "<span class=\"url\"><a rel=\"nofollow\" class=\"external text\" href=\"http://www.bafta.org/\">Official website</a>";
ProducerConsumerQueue< ParsedUrl > urlFrontierTest;
ParsedUrl url = ParsedUrl( "http://testurl.com" );
char docString[10240];
strcpy( docString, line );
Document document( url, docString );
Parser parser( &urlFrontierTest );
auto dictionary = parser.execute( &document );
for ( auto it = dictionary->begin( ); it != dictionary->end( ); it++ )
{
cout << it->first << ':';
for ( int i = 0; i < it->second.size( ); ++i )
{
cout << it->second[ i ] << " ";
}
cout << std::endl;
}
string completeUrl = "";
completeUrl.assign( urlFrontierTest.Pop( ).CompleteUrl );
assert ( completeUrl == "http://www.bafta.org/" );
assert ( dictionary->find( "$bafta" ) == dictionary->end( ) );
assert ( dictionary->find( "$testurl" ) != dictionary->end( ) );
delete dictionary;
dictionary = nullptr;
// cout << "Testing URL: " << endl;
// testURL( );
// cout << "URL Test Passed!" << endl << endl;
// cout << "Testing Simple: " << endl;
// testSimple( );
// cout << "Simple Test Passed!" << endl << endl;
// cout << "Testing Complex: " << endl;
// testComplex( );
// cout << "Complex Test Passed!" << endl;
// cout << "Testing BODY: " << endl;
// testExtractBody( );
// testBody( );
cout << "Parser Tests Passed! :D" << endl;
}
void testBody( )
{
ProducerConsumerQueue< ParsedUrl > urlFrontierTest;
ParsedUrl url = ParsedUrl( "http://www.testurl.com" );
char docString[1024];
strcpy( docString, "<!DOCTYPE html>\n"
"<html>\n"
"<head>\n"
"<!-- HTML Codes by Quackit.com -->\n"
"<title>\n"
"Story of Cat</title>\n"
"<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n"
"<meta name=\"keywords\" content=\"cat story\">\n"
"<meta name=\"description\" content=\"This is the tale of a cat names joe\">\n"
"<style>\n"
"body {background-color:#ffffff;background-repeat:no-repeat;background-position:top left;background-attachment:fixed;}\n"
"h1{font-family:Arial, sans-serif;color:#000000;background-color:#ffffff;}\n"
"p {font-family:Georgia, serif;font-size:14px;font-style:normal;font-weight:normal;color:#000000;background-color:#ffffff;}\n"
"</style>\n"
"</head>\n"
"<body>\n"
"<h1>Joe the cat</h1>\n"
"<p>On Saturday, joe the cat went to the store. He climbed up a mountain? It was weird. The store was called Food Store</p>\n"
"</body>\n"
"</html>" );
Document document( url, docString );
Parser parser( &urlFrontierTest );
auto dictionary = parser.execute( &document );
cout << dictionary->size( ) << endl;
//assert( dictionary->size( ) == 4);
for ( auto it = dictionary->begin( ); it != dictionary->end( ); it++ )
{
cout << it->first << ':';
for ( int i = 0; i < it->second.size( ); ++i )
{
cout << it->second[ i ] << " ";
}
cout << std::endl;
}
}
void testExtractBody ( )
{
ProducerConsumerQueue< ParsedUrl > urlFrontierTest;
ParsedUrl url = ParsedUrl( "http://www.testurl.com" );
char docString[1024];
strcpy( docString, "<title>Paragraph body text hello</title>" );
Document document( url, docString );
Parser parser( &urlFrontierTest );
auto dictionary = parser.execute( &document );
cout << dictionary->size( ) << endl;
for ( auto it = dictionary->begin( ); it != dictionary->end( ); it++ )
{
cout << it->first << ':';
for ( int i = 0; i < it->second.size( ); ++i )
{
cout << it->second[ i ] << " ";
}
cout << std::endl;
}
cout << endl << endl;
assert( dictionary->size( ) == 6);
char docString2[1024];
strcpy( docString2, "<p>Paragraph body text hello <title>Specific title</title> more body words</p>" );
Document document2( url, docString2 );
Parser parser2 ( &urlFrontierTest );
dictionary = parser.execute( &document2 );
cout << "Dictionary 2 size " << dictionary->size( ) << endl;
for ( auto it = dictionary->begin( ); it != dictionary->end( ); it++ )
{
cout << it->first << ':';
for ( int i = 0; i < it->second.size( ); ++i )
{
cout << it->second[ i ] << " ";
}
cout << std::endl;
}
assert( dictionary->size( ) == 10);
assert( dictionary->at( "#specif" )[0] == 0);
assert( dictionary->at("%paragraph")[0] == 0);
assert( dictionary->at("%bodi")[1] == 5);
}
//
//
//
//\
//
//
//void testBody ( )
// {
// ProducerConsumerQueue< ParsedUrl > urlFrontierTest;
// ParsedUrl url = ParsedUrl( "http://www.testurl.com" );
// char docString[1024];
// strcpy( docString, "<!DOCTYPE html>\n"
// "<html>\n"
// "<head>\n"
// "<!-- HTML Codes by Quackit.com -->\n"
// "<title>\n"
// "Story of Cat</title>\n"
// "<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n"
// "<meta name=\"keywords\" content=\"cat story\">\n"
// "<meta name=\"description\" content=\"This is the tale of a cat names joe\">\n"
// "<style>\n"
// "body {background-color:#ffffff;background-repeat:no-repeat;background-position:top left;background-attachment:fixed;}\n"
// "h1{font-family:Arial, sans-serif;color:#000000;background-color:#ffffff;}\n"
// "p {font-family:Georgia, serif;font-size:14px;font-style:normal;font-weight:normal;color:#000000;background-color:#ffffff;}\n"
// "</style>\n"
// "</head>\n"
// "<body>\n"
// "<h1>Joe the cat</h1>\n"
// "<p>On Saturday, joe the cat went to the store. He climbed up a mountain? It was weird. The store was called Food Store</p>\n"
// "</body>\n"
// "</html>" );
// Document document( url, docString );
//
// Parser parser( &urlFrontierTest );
// auto dictionary = parser.execute( &document );
// cout << dictionary->size( ) << endl;
// //assert( dictionary->size( ) == 4);
// for ( auto it = dictionary->begin( ); it != dictionary->end( ); it++ )
// {
// cout << it->first << ':';
// for ( int i = 0; i < it->second.size( ); ++i )
// {
// cout << it->second[ i ] << " ";
// }
// cout << std::endl;
// }
// }
//
//void testExtractBody ( )
// {
// ProducerConsumerQueue< ParsedUrl > urlFrontierTest;
// ParsedUrl url = ParsedUrl( "http://www.testurl.com" );
// char docString[1024];
// strcpy( docString, "<title>Paragraph body text hello</title>" );
// Document document( url, docString );
//
// Parser parser( &urlFrontierTest );
// auto dictionary = parser.execute( &document );
// cout << dictionary->size( ) << endl;
// for ( auto it = dictionary->begin( ); it != dictionary->end( ); it++ )
// {
// cout << it->first << ':';
// for ( int i = 0; i < it->second.size( ); ++i )
// {
// cout << it->second[ i ] << " ";
// }
// cout << std::endl;
// }
// cout << endl << endl;
// assert( dictionary->size( ) == 6 );
//
// char docString2[1024];
// strcpy( docString2, "<p>Paragraph body text hello <title>Specific title</title> more body words</p>" );
// Document document2( url, docString2 );
// Parser parser2( &urlFrontierTest );
// dictionary = parser.execute( &document2 );
// cout << "Dictionary 2 size " << dictionary->size( ) << endl;
// for ( auto it = dictionary->begin( ); it != dictionary->end( ); it++ )
// {
// cout << it->first << ':';
// for ( int i = 0; i < it->second.size( ); ++i )
// {
// cout << it->second[ i ] << " ";
// }
// cout << std::endl;
// }
// assert( dictionary->size( ) == 10 );
// assert( dictionary->at( "#specif" )[ 0 ] == 0 );
// assert( dictionary->at( "%paragraph" )[ 0 ] == 0 );
// assert( dictionary->at( "%bodi" )[ 1 ] == 5 );
//
//
// }
//
//
......@@ -44,7 +44,7 @@ public:
*temp_AnchorText,
*temp_pathBuffer;
//intialize anchor text to "null"
//intialize anchor text to ""
char *null = new char[2];
strcpy( null, string( "" ).c_str( ) );
temp_AnchorText = null;
......@@ -90,15 +90,11 @@ public:
{
for ( ; *i; i++ )
{
if ( *i == Period )
temp_Domain = i;
}
}
// Whatever remains is the Path. // need to remove fragments
temp_Path = p;
......@@ -107,8 +103,6 @@ public:
if ( *p )
// Mark the end of the Path, remove fragments.
*p++ = 0;
}
else
temp_Host = temp_Path = p;
......
<html>
<title>
Apple Ardvark Anteater Alligator
Apple Aardvark Anteater Alligator
</title>
<p class="text-muted">
Basement Battle Bridge Bottle
Basement Battle Bridge Bottle Bottle
</p>
<p class="text-muted">
Hello Goodbye <a href="http://veronicaday.com/" class="btn btn-yes">
Hello Goodbye Hello <a href="http://veronicaday.com/" class="btn btn-yes"</a>
Cat Cradle
</p>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html lang="en">
<body>
<li><span class=\"official-website\"><span class=\"url\"><a rel=\"nofollow\" class=\"external text\" href="http://www.bafta.org/">Official website</a></span></span></li>
</body>
</html>
\ No newline at end of file
......@@ -39,9 +39,8 @@ unsigned long Tokenizer::execute ( string originalText, unsigned long offset, ch
{
set< char > split = { '.', ':', '/', '\\', '_', '?', '-', '~', '#', '[', ']', '@', '!', '$', '&', '\'',
'(', ')', '*', '+', ',', ';', '=' };
string codedURL = "=";
codedURL += originalText;
(*docIndex)[codedURL].push_back(0);
( *docIndex )[ Tokenizer::HOST + originalText ].push_back(0);
return tokenize( splitStr( originalText, split, true ), offset, decorator );
}
......
......@@ -18,6 +18,7 @@ public:
static const char ANCHOR = '@';
static const char URL = '$';
static const char BODY = '%';
static const char HOST = '=';
/**
* Tokenizer Cstor
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment