Commit 068ba1b5 authored by sgebreeg's avatar sgebreeg
Browse files

memory fixes v1

parent 955e75c9
......@@ -10,25 +10,20 @@
using namespace std;
DecisionTree::DecisionTree(vector <vector<string>> data, int maxDepth, float featureWeight,
vector <FeatureType> featureType) {
// vector<int> index = randomSelect_WithoutReplacement(data.size(), featureWeight);
// for(int i=0; i<index.size();i++){
// for(int j=1; j<data[index[i]].size(); j++){
// data[index[i]][j] = data[index[i]][0];
// }
// }
DecisionTree::DecisionTree(vector <vector<string>> &data, vector<int> &trainingIndx, int maxDepth, float featureWeight,
vector <FeatureType> &featureType) {
this->maxDepth = maxDepth;
this->featureWeight = featureWeight;
this->root = train(data, featureType, 0.0, 0, maxDepth, featureWeight);
//TODO int<vector> take all iteration
this->root = train(data, featureType, 0.0, 0, maxDepth, featureWeight, trainingIndx);
// this->printTree(this->root, 0);
};
Node *train(vector <vector<string>> data, vector <FeatureType> featureType,
double parentEntropy, int currentDepth, int maxDepth, float featureWeight) {
Node *train(vector <vector<string>> &data, vector <FeatureType> &featureType,
double parentEntropy, int currentDepth, int maxDepth, float featureWeight, vector<int> nodeDatasetIndices ) {
std::pair<string, double> classificationAndEntropy = classifyWithEntropy(data);
//TODO pass data pointer and index vector
std::pair<string, double> classificationAndEntropy = classifyWithEntropy(data, nodeDatasetIndices);
string classification = classificationAndEntropy.first;
double originalEntropy = classificationAndEntropy.second;
double informationGainFromParent;
......@@ -45,9 +40,10 @@ Node *train(vector <vector<string>> data, vector <FeatureType> featureType,
} else {
//TODO send data vector of index
//find best split point
BestSplitPoint bestSplit = findBestSplit(parentEntropy, currentDepth, data,
featureType, featureWeight);
featureType, featureWeight, nodeDatasetIndices);
if(bestSplit.featureIdx == -1 || bestSplit.featureIdx > data.size()-1 ){
Node *leaf = new Node(NULL, NULL, NULL, true, classification, originalEntropy, informationGainFromParent);
......@@ -57,14 +53,16 @@ Node *train(vector <vector<string>> data, vector <FeatureType> featureType,
// cout<<"splitting data"<<endl;
//split data
FeatureSplitData featureSplitData = splitData(data, bestSplit.featureIdx, featureType,
bestSplit.splitpoint);
//TODO send data and index vector
//TODO return indices for left and right
FeatureSplitDataIndx featureSplitData = splitData(data, bestSplit.featureIdx, featureType,
bestSplit.splitpoint, nodeDatasetIndices);
//No longer splittable
if (featureSplitData.dataTrue[0].size() < 1 || featureSplitData.dataFalse[0].size() < 1) {
if (featureSplitData.dataTrue.size() < 1 || featureSplitData.dataFalse.size() < 1) {
Node *leaf = new Node(NULL, NULL, NULL, true, classification, originalEntropy, informationGainFromParent);
return leaf;
}
......@@ -75,10 +73,11 @@ Node *train(vector <vector<string>> data, vector <FeatureType> featureType,
// cout<<"Next Train"<<endl;
//call train for left and right data
Node *leftNode = train(featureSplitData.dataTrue, featureType, originalEntropy, currentDepth + 1, maxDepth,
featureWeight);
Node *rightNode = train(featureSplitData.dataFalse, featureType, originalEntropy, currentDepth + 1, maxDepth,
featureWeight);
//TODO pass int vector from splits
Node *leftNode = train(data, featureType, originalEntropy, currentDepth + 1, maxDepth,
featureWeight, featureSplitData.dataTrue);
Node *rightNode = train(data, featureType, originalEntropy, currentDepth + 1, maxDepth,
featureWeight, featureSplitData.dataFalse);
Node *node = new Node(question, leftNode, rightNode, false, classification, originalEntropy,
......
......@@ -12,7 +12,7 @@
class DecisionTree {
public:
DecisionTree(vector <vector<string>> data, int maxDepth, float featureWeight, vector <FeatureType> featureType);
DecisionTree(vector <vector<string>> &data, vector<int> &trainingIndx, int maxDepth, float featureWeight, vector <FeatureType> &featureType);
string predictSingle(vector <string> test, Node *treeRoot, PredictionReport * report);
string predictSingle(vector <string> test, Node *treeRoot);
......@@ -29,8 +29,8 @@ private:
};
Node *train(vector <vector<string>> data, vector <FeatureType> featureType,
double entropy, int currentDepth, int maxDepth, float featureWeight);
Node *train(vector <vector<string>> &data, vector <FeatureType> &featureType,
double entropy, int currentDepth, int maxDepth, float featureWeight, vector<int> nodeDatasetIndices);
#endif //RACER_DECISIONTREE_HPP
......@@ -39,7 +39,7 @@ vector<int> getParts(int trees, int cpus) {
return temp;
}
RandomForest::RandomForest(vector <vector<string>> trainingData, vector <FeatureType> featureTypes, int numTrees,
RandomForest::RandomForest(vector <vector<string>> &data, vector<int> &trainingIndx, vector <FeatureType> &featureTypes, int numTrees,
float baggingWeight, float featureWeight, int maxDepth) {
vector < DecisionTree * > decisionTrees;
this->featureWeight = featureWeight;
......@@ -57,13 +57,13 @@ RandomForest::RandomForest(vector <vector<string>> trainingData, vector <Feature
vector<int> temp = getParts(numTrees, num_cpus); //determine how many trees to run in parallel
for (int i = 0; i < num_cpus; i++) {
if (i < temp.size())
threads[i] = std::thread([&iomutex, i, temp, trainingData,baggingWeight,
maxDepth, featureWeight, featureTypes, &decisionTrees] {
threads[i] = std::thread([&iomutex, i, temp, &data, &trainingIndx, baggingWeight,
maxDepth, featureWeight, &featureTypes, &decisionTrees] {
for (int j = 0; j < temp.at(i); j++) {
vector <vector<string>> baggedData = bagData(trainingData, baggingWeight);
// vector <int> baggedData = bagData(trainingIndx, baggingWeight); //TODO fix this
cout<<"Training tree "<< j<<" in thread "<<i<<endl;
DecisionTree *tree = new DecisionTree(baggedData, maxDepth, featureWeight, featureTypes);
DecisionTree *tree = new DecisionTree(data, trainingIndx, maxDepth, featureWeight, featureTypes);
cout<<"Done training tree "<< j<<" in thread "<<i<<endl;
{
// Use a lexical scope and lock_guard to safely lock the mutex only for
......
......@@ -12,7 +12,7 @@ public:
float featureWeight;
int depth;
RandomForest(vector <vector<string>> trainingData, vector <FeatureType> featureType, int numTrees,
RandomForest(vector <vector<string>> &data, vector<int> &trainingIndx, vector <FeatureType> &featureType, int numTrees,
float baggingWeight, float featureWeight, int maxDepth);
vector <string> getForestPrediction(vector <string> test, RandomForest *forest, vector <string> features);
......
......@@ -236,6 +236,7 @@ calSplitEntropy(std::map<std::string, int> leftLabelCount, std::map<std::string,
return splitEntropy;
}
//TODO
float calculateSplitEntropy(FeatureSplitData featsplitData) {
vector <vector<string>> splitDataTrue = featsplitData.dataTrue;
vector <vector<string>> splitDataFalse = featsplitData.dataFalse;
......@@ -251,68 +252,57 @@ float calculateSplitEntropy(FeatureSplitData featsplitData) {
}
FeatureSplitData
splitData(vector <vector<string>> data, int splitFeature, vector <FeatureType> featureTypes, string splitValue) {
//TODO accept data reference and vector index
FeatureSplitDataIndx
splitData(vector <vector<string>> data, int splitFeature, vector <FeatureType> featureTypes, string splitValue,
vector<int> &nodeDatasetIndices) {
//cout << "split feature " << splitFeature << " split val "<< splitValue<<endl;
auto start = high_resolution_clock::now();
vector <string> splitFeatureData = data.at(splitFeature);
FeatureSplitData featSplitData;
vector <vector<string>> splitTrue;
vector <vector<string>> splitFalse;
vector <string> emptyStringVector;
for (int idx = 0; idx < data.size(); idx++) {
splitTrue.push_back(emptyStringVector);
splitFalse.push_back(emptyStringVector);
}
FeatureSplitDataIndx featSplitData;
vector<int> splitTrue;
vector<int> splitFalse;
if (featureTypes.at(splitFeature) == CATEGORICAL) {
for (int dataIdx = 0; dataIdx < splitFeatureData.size(); dataIdx++) {
if (splitFeatureData[dataIdx] == splitValue) {
for (int featureIdx = 0; featureIdx < data.size(); featureIdx++) {
splitTrue.at(featureIdx).push_back(data.at(featureIdx).at(dataIdx));
}
for (int i: nodeDatasetIndices) {
if (data.at(splitFeature).at(i) == splitValue) {
splitTrue.push_back(i);
} else {
for (int featureIdx = 0; featureIdx < data.size(); featureIdx++) {
splitFalse.at(featureIdx).push_back(data.at(featureIdx).at(dataIdx));
}
splitFalse.push_back(i);
}
}
} else {
for (int dataIdx = 0; dataIdx < splitFeatureData.size(); dataIdx++) {
if (stod(splitFeatureData[dataIdx]) <= stod(splitValue)) {
for (int featureIdx = 0; featureIdx < data.size(); featureIdx++) {
splitTrue.at(featureIdx).push_back(data.at(featureIdx).at(dataIdx));
}
for (int i: nodeDatasetIndices) {
if (stod(data.at(splitFeature).at(i)) <= stod(splitValue)) {
splitTrue.push_back(i);
} else {
for (int featureIdx = 0; featureIdx < data.size(); featureIdx++) {
splitFalse.at(featureIdx).push_back(data.at(featureIdx).at(dataIdx));
}
splitFalse.push_back(i);
}
}
}
//TODO change to vector of int check
featSplitData = {splitTrue, splitFalse};
double time = (high_resolution_clock::now() - start).count() / NANOSECONDS_IN_SECOND;
//cout << "Time to split data "<< time << "\n";
return featSplitData;
}
void
sortDataByFeature(int featIdx, vector <vector<string>> data, vector <pair<int, string>> &featureData) {
for (int dataIdx = 0; dataIdx < data[0].size(); dataIdx++) {
featureData.emplace_back(dataIdx, data[featIdx].at(dataIdx));
sortDataByFeature(int featIdx, vector <vector<string>> data, vector <pair<int, string>> &featureData,
vector<int> &nodeDatasetIndices) {
for (int dataIdx = 0; dataIdx < nodeDatasetIndices.size(); dataIdx++) { //TODO check
featureData.emplace_back(nodeDatasetIndices[dataIdx], data[featIdx].at(nodeDatasetIndices[dataIdx]));
}
sort(featureData.begin(), featureData.end(), [](pair<int, string> &a, pair<int, string> &b) {
return a.second < b.second;
});
}
BestSplitPoint findBestSplit(double parentEntropy, int currentDepth, vector <vector<string>> data,
vector <FeatureType> featureTypes, float featureWeight) {
BestSplitPoint findBestSplit(double parentEntropy, int currentDepth, vector <vector<string>> &data,
vector <FeatureType> featureTypes, float featureWeight, vector<int>& nodeDatasetIndices) {
//TODO accept data and vector of index check
vector<int> randomFeatures = randomSelect_WithoutReplacement(data.size(), featureWeight);
int bestFeatureIndex = randomFeatures[0];
......@@ -320,14 +310,14 @@ BestSplitPoint findBestSplit(double parentEntropy, int currentDepth, vector <vec
string bestSplitValue = "";
for (auto featureIndex: randomFeatures) {
if ( featureIndex != data.size()-1 ) { //because last column is label
if (featureIndex != data.size() - 1) { //because last column is label
//initialize variables
string threshold = "";
int dataIndex;
std::map<std::string, int> leftLabelCount;
std::map<std::string, int> rightLabelCount;
//count right side labels
for (int i = 0; i < data[data.size() - 1].size(); i++) {
for (int i : nodeDatasetIndices) { //TODO check
if (rightLabelCount.count(data[data.size() - 1][i])) {
rightLabelCount[data[data.size() - 1][i]] += 1;
......@@ -339,37 +329,40 @@ BestSplitPoint findBestSplit(double parentEntropy, int currentDepth, vector <vec
}
int leftSize = 0;
int rightSize = data.at(featureIndex).size();
int rightSize = nodeDatasetIndices.size(); //TODO check
vector <pair<int, string>> featureData;
featureData.reserve(data[0].size());
featureData.reserve(nodeDatasetIndices.size()); //TODO check
//done initializing variables
//sort data with selected feature
sortDataByFeature(featureIndex, data, featureData);
sortDataByFeature(featureIndex, data, featureData, nodeDatasetIndices); //TODO check
for (int indx = 0; indx < featureData.size();) {
threshold = featureData.at(indx).second;
dataIndex = featureData.at(indx).first;
while (indx < data.at(featureIndex).size() && featureData.at(indx).second <= threshold) {
while (indx < nodeDatasetIndices.size() && featureData.at(indx).second <= threshold) { //TODO check
leftSize++;
rightSize--;
if (leftLabelCount.count(data[data.size() - 1][indx])) {
leftLabelCount[data[data.size() - 1][indx]] += 1;
if (leftLabelCount.count(data[data.size() - 1][nodeDatasetIndices[indx]])) {
leftLabelCount[data[data.size() - 1][nodeDatasetIndices[indx]]] += 1;
} else {
leftLabelCount[data[data.size() - 1][indx]] = 1;
leftLabelCount[data[data.size() - 1][nodeDatasetIndices[indx]]] = 1;
}
rightLabelCount[data[data.size() - 1][indx]] -= 1;
rightLabelCount[data[data.size() - 1][nodeDatasetIndices[indx]]] -= 1;
indx++;
dataIndex = featureData[indx].first;
}
if (indx == data[0].size()) {
if (indx == nodeDatasetIndices.size()) { //TODO check
continue;
}
double splitEntropy = calSplitEntropy(leftLabelCount, rightLabelCount, leftSize, rightSize);
if (splitEntropy < 0 || splitEntropy > 1) {
cout << "Checkpoint ERROR" << endl;
}
if (splitEntropy < minEntropy) {
// cout<<"Best split at "<< featureIndex <<" value "<<threshold<<" Entropy "<< splitEntropy<<endl;
minEntropy = splitEntropy;
......@@ -381,7 +374,7 @@ BestSplitPoint findBestSplit(double parentEntropy, int currentDepth, vector <vec
}
}
}
if (minEntropy >= parentEntropy && currentDepth != 0){
if (minEntropy >= parentEntropy && currentDepth != 0) {
bestFeatureIndex = -1;
bestSplitValue = "";
}
......@@ -389,7 +382,6 @@ BestSplitPoint findBestSplit(double parentEntropy, int currentDepth, vector <vec
}
string classifyData(vector <vector<string>> data) {
std::map<std::string, int> dataCount;
int maxVote = 0;
......@@ -415,28 +407,25 @@ string classifyData(vector <vector<string>> data) {
return label;
}
std::pair<string, double> classifyWithEntropy(vector <vector<string>> data) {
std::pair<string, double> classifyWithEntropy(vector <vector<string>> &data, vector<int> indices) {
//TODO get data reference and vector of index
auto start = high_resolution_clock::now();
std::map<std::string, int> dataCount;
double entropy = 0.0;
int maxVote = 0;
string label;
for (int i = 0; i < data[data.size() - 1].size(); i++) {
for (int i: indices) {
if (dataCount.count(data[data.size() - 1][i])) {
dataCount[data[data.size() - 1][i]] += 1;
} else {
dataCount[data[data.size() - 1][i]] = 1;
}
}
map<string, int>::iterator itr;
for (itr = dataCount.begin(); itr != dataCount.end(); ++itr) {
// cout<<itr->first<<itr->second<<endl;
double probability = (double) itr->second / (double) data[data.size() - 1].size();
double probability = (double) itr->second / (double) indices.size();
if (probability > 0) {
entropy -= (probability) * log2(probability);
}
......@@ -448,43 +437,37 @@ std::pair<string, double> classifyWithEntropy(vector <vector<string>> data) {
}
std::pair<string, double> classificationWithEntropy(label, entropy);
double time = (high_resolution_clock::now() - start).count() / NANOSECONDS_IN_SECOND;
//cout << "Time for classify with entropy: "<< time << "seconds\n";
return classificationWithEntropy;
}
vector <vector<string>> bagData(vector <vector<string>> data, float baggingWeight) {
vector <vector<string>> newData;
vector <string> temp;
for (int i = 0; i < data.size(); i++) {
newData.push_back(temp);
}
vector<int> selection = randSelectIdxWithReplacement(data[0].size(), baggingWeight);
vector <int> bagData(vector <int> &indices, float baggingWeight) {
vector <int> newData;
vector<int> selection = randSelectIdxWithReplacement(indices.size(), baggingWeight);
sort(selection.begin(), selection.end());
for (int i = 0; i < selection.size(); i++) {
for (int j = 0; j < newData.size(); j++) {
newData.at(j).push_back(data.at(j).at(selection.at(i)));
}
newData.push_back(indices.at(i));
}
return newData;
}
vector <vector<string>> oversample(vector <vector<string>> data) {
vector <int> oversample(vector <vector<string>> &data, vector<int> &indices) {
int lableIdx = data.size() - 1;
vector <vector<string>> oversampled;
vector <int> oversampled;
vector <string> emptyVecString;
vector<int> toAdd;
int highestCount = 0;
map<string, int> lableCount;
map <string, vector<int>> lableWithIdx;
map <string, vector<int>> labelWithIdx;
for (int i = 0; i < data[lableIdx].size(); i++) {
for (int i : indices) {
if (lableCount.count(data[lableIdx][i])) {
lableCount[data[lableIdx][i]] += 1;
lableWithIdx[data[lableIdx][i]].push_back(i);
labelWithIdx[data[lableIdx][i]].push_back(i);
if (lableCount[data[lableIdx][i]] > highestCount) {
highestCount = lableCount[data[lableIdx][i]];
}
......@@ -492,7 +475,7 @@ vector <vector<string>> oversample(vector <vector<string>> data) {
} else {
lableCount[data[lableIdx][i]] = 1;
lableWithIdx[data[lableIdx][i]].push_back(i);
labelWithIdx[data[lableIdx][i]].push_back(i);
if (lableCount[data[lableIdx][i]] > highestCount) {
highestCount = lableCount[data[lableIdx][i]];
......@@ -507,10 +490,10 @@ vector <vector<string>> oversample(vector <vector<string>> data) {
for (countItr = lableCount.begin(); countItr != lableCount.end(); countItr++) {
string lable = countItr->first;
int difference = highestCount - countItr->second;
cout << "lable " << lable << " is " << difference << " times less than highest lable \n";
cout << "label " << lable << " is " << difference << " times less than highest label \n";
if (difference > 0) {
int lablesize = lableWithIdx[lable].size();
vector<int> idxs = lableWithIdx[lable];
int lablesize = labelWithIdx[lable].size();
vector<int> idxs = labelWithIdx[lable];
if (difference < lablesize) {
toAdd.insert(toAdd.end(), idxs.begin(), idxs.begin() + (difference));
......@@ -530,13 +513,7 @@ vector <vector<string>> oversample(vector <vector<string>> data) {
}
}
for (int fIdx = 0; fIdx < data.size(); ++fIdx) {
oversampled.emplace_back(emptyVecString);
for (int iIdx = 0; iIdx < toAdd.size(); ++iIdx) {
oversampled.at(fIdx).emplace_back(data.at(fIdx).at(toAdd.at(iIdx)));
}
}
return oversampled;
return toAdd;
}
\ No newline at end of file
......@@ -18,6 +18,11 @@ struct FeatureSplitData {
vector<vector<string>> dataFalse;
};
struct FeatureSplitDataIndx {
vector<int> dataTrue;
vector<int> dataFalse;
};
struct BestSplitPoint {
int featureIdx;
string splitpoint;
......@@ -30,14 +35,14 @@ vector<int> randSelectIdxWithReplacement(int originalNum, float percent);
void splitTrainingAndTesting(vector<int> trainingIndecies,vector<vector<string>> dataString,
vector<vector<string>>& trainingDataString,vector<vector<string>>& testDataString);
string classifyData(vector <vector<string>> data);
std::pair<string,double> classifyWithEntropy(vector<vector<string>> data);
FeatureSplitData splitData(vector<vector<string>>data, int splitFeature,vector<FeatureType> featureTypes, string splitValue);
std::pair<string,double> classifyWithEntropy(vector<vector<string>> &data, vector<int> indices);
FeatureSplitDataIndx splitData(vector<vector<string>>data, int splitFeature,vector<FeatureType> featureTypes, string splitValue, vector<int> &nodeDatasetIndices );
float calculateEntropy(vector<vector<string>> data);
float calculateSplitEntropy (FeatureSplitData featsplitData);
vector<vector<string>> bagData(vector<vector<string>> data, float baggingWeight);
vector <int> bagData(vector <int> &indices, float baggingWeight);
vector<int> randomSelect_WithoutReplacement(int originalNum, float percentTraining);
vector<vector<string>> oversample(vector<vector<string>> data);
BestSplitPoint findBestSplit(double parentEntropy, int currentDepth, vector <vector<string>> data,
vector <FeatureType> featureType, float featureWeight);
vector<int> oversample(vector<vector<string>> &data, vector<int> &indices);
BestSplitPoint findBestSplit(double parentEntropy, int currentDepth, vector <vector<string>> &data,
vector <FeatureType> featureType, float featureWeight, vector<int>& nodeDatasetIndices );
#endif
\ No newline at end of file
......@@ -56,38 +56,27 @@ int main(int argc, char *argv[]) {
vector <vector<string>> trainingData;
vector <vector<string>> testingData;
splitTrainingAndTesting(trainingIdxs, datasetAsString, trainingData, testingData);
cout << "Over sampling training data " << endl;;
vector <vector<string>> oversampledData = oversample(trainingData);
// cout << "over sampled data size "<< oversampledData.at(0).size() <<endl;
cout << "Over sampling training data " << endl;
for (int fIdx = 0; fIdx < trainingData.size(); ++fIdx) {
for (int oIdx = 0; oIdx < oversampledData.at(0).size(); ++oIdx) {
trainingData.at(fIdx).emplace_back(oversampledData.at(fIdx).at(oIdx));
}
}
vector<int> oversampledData = oversample(datasetAsString, trainingIdxs);
// cout<< "training data size after oversample" << trainingData.at(0).size()<<endl;
// trainingIdxs.insert(trainingIdxs.end(), oversampledData.begin(), oversampledData.end());
// sort(trainingIdxs.begin(), trainingIdxs.end());
vector <string> testData;
string emptystring;
for (int featIndex = 0; featIndex < testingData.size(); featIndex++) {
testData.push_back(emptystring);
}
// string data = testingData.at(1).at(0);
// cout << data << endl;
// for (int featIndex = 0; featIndex < testingData.size(); featIndex++) {
// testData.at(featIndex) = testingData.at(featIndex).at(41);
// //cout<<testingData.at(featIndex).at(0)<<", ";
// }
//cout<<endl;
auto start = high_resolution_clock::now();
RandomForest *randomForest = new RandomForest(trainingData, featureTypes, numTrees, baggingWeight, featureWeight, depth);
RandomForest *randomForest = new RandomForest(datasetAsString, trainingIdxs, featureTypes, numTrees,
baggingWeight, featureWeight, depth);
time += (high_resolution_clock::now() - start).count() / 1000000000.0;
......@@ -100,7 +89,6 @@ int main(int argc, char *argv[]) {
randomForest->printAccuracyReportFile(report);
cout << "**************** prediction with explanation ********** " << endl;
for (int featIndex = 0; featIndex < testingData.size(); featIndex++) {
......@@ -116,8 +104,9 @@ int main(int argc, char *argv[]) {
}
ofstream outfile;
outfile.open("avg.txt", ios::app);
outfile<< "------ Report ------ " <<endl;
outfile<< numTrees<<"\t"<<depth<<"\t"<<featureWeight<<"\t"<<baggingWeight<<"\t"<<accuracy/3<<"\t"<<time/3<<endl;
outfile << "------ Report ------ " << endl;
outfile << numTrees << "\t" << depth << "\t" << featureWeight << "\t" << baggingWeight << "\t" << accuracy / 3
<< "\t" << time / 3 << endl;
// outfile<< numTrees<<"\t"<<10<<"\t"<<0.7<<"\t"<<baggingWeight<<"\t"<<accuracy/3<<"\t"<<time/3<<endl;
outfile.close();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment