199 lines
4.9 KiB
C++
199 lines
4.9 KiB
C++
/*
|
||
* LinearRegression.cpp
|
||
*
|
||
* Created on: 23 нояб. 2016 г.
|
||
* Author: titov
|
||
*/
|
||
|
||
#include "LinearRegression.hh"
|
||
|
||
#include <cmath>
|
||
|
||
using algorithm::LinearRegression;
|
||
|
||
LinearRegression::LinearRegression(unsigned int init_rowData, unsigned int init_colData, std::pmr::memory_resource * memory ) :
|
||
memory(memory) {
|
||
|
||
colData = init_colData;
|
||
rowData = init_rowData;
|
||
|
||
alfa = 0;
|
||
startCalculation = false;
|
||
numCalculation = 0;
|
||
|
||
data = std::pmr::polymorphic_allocator<float *>(memory).allocate(rowData);//new float * [rowData];
|
||
for (int i = 0; i < rowData; i++)
|
||
data[i] = std::pmr::polymorphic_allocator<float>(memory).allocate(colData);//new float [colData];
|
||
|
||
Theta = std::pmr::polymorphic_allocator<float>(memory).allocate(colData);//new float [colData];
|
||
TempTheta = std::pmr::polymorphic_allocator<float>(memory).allocate(colData);//new float [colData];
|
||
result = std::pmr::polymorphic_allocator<float>(memory).allocate(rowData);//new float [rowData];
|
||
|
||
for( int j = 0; j < colData; j++ ) {
|
||
|
||
Theta[j] = 0.0f;
|
||
TempTheta[j] = 0.0f;
|
||
|
||
}
|
||
|
||
|
||
rowIndex = 0;
|
||
colIndex = 0;
|
||
|
||
for( int i = 0; i < rowData; i++ ) {
|
||
for( int j = 0; j < colData; j++ )
|
||
data[i][j] = NAN;
|
||
|
||
result[i] = NAN;
|
||
}
|
||
}
|
||
|
||
LinearRegression::~LinearRegression()
|
||
{
|
||
for( int i = 0; i < rowData; i++ )
|
||
std::pmr::polymorphic_allocator<float>(memory).deallocate( data[i], colData );//delete [] data[i];
|
||
|
||
std::pmr::polymorphic_allocator<float *>(memory).deallocate( data, rowData );// delete [] data;
|
||
|
||
std::pmr::polymorphic_allocator<float>(memory).deallocate( Theta, colData );//delete [] Theta;
|
||
std::pmr::polymorphic_allocator<float>(memory).deallocate( TempTheta, colData );//delete [] TempTheta;
|
||
std::pmr::polymorphic_allocator<float>(memory).deallocate( result, rowData );//delete [] result;
|
||
}
|
||
|
||
//float algorithm::LinearRegression::errorCalc(float hp, float res) const {
|
||
//
|
||
// return hp - res;
|
||
//
|
||
//}
|
||
|
||
float LinearRegression::getTheta(unsigned int idTheta) const
|
||
{
|
||
if(idTheta < colData)
|
||
return Theta[idTheta];
|
||
else
|
||
return NAN;
|
||
}
|
||
|
||
bool LinearRegression::addData(float * new_data, float new_result)
|
||
{
|
||
if(rowIndex < rowData)
|
||
{
|
||
for(int i = 0; i < colData; i++)
|
||
{
|
||
data[rowIndex][i] = new_data[i];
|
||
}
|
||
|
||
result[rowIndex] = new_result;
|
||
|
||
rowIndex++;
|
||
}
|
||
else
|
||
{
|
||
return false;
|
||
}
|
||
|
||
return true;
|
||
}
|
||
|
||
bool LinearRegression::init(float initAlfa, float * initTheta, unsigned int numCalc)
|
||
{
|
||
alfa = initAlfa;
|
||
startCalculation = false;
|
||
numCalculation = numCalc;
|
||
|
||
bool created;
|
||
|
||
if(Theta && TempTheta && result && data) {
|
||
created = true;
|
||
for(int i = 0; i < rowData; i++) {
|
||
created = created && data[i];
|
||
}
|
||
} else {
|
||
created = false;
|
||
}
|
||
|
||
for(int i = 0; i < colData && created; i++)
|
||
Theta[i] = initTheta[i];
|
||
|
||
return created;
|
||
}
|
||
|
||
bool LinearRegression::update()
|
||
{
|
||
float h_p = 0.0f;
|
||
float update_Theta = 0.0f;
|
||
float delta_Error = 0.0f;
|
||
|
||
for(int k = 0; k < colData; k++) //Расчитываем обновление для каждого значение Theta
|
||
{
|
||
update_Theta = 0.0f;
|
||
for(int j = 0; j < rowData; j++) //Считаем суммарную ошибку.
|
||
{
|
||
h_p = 0.0f;
|
||
for(int i = 0; i < colData; i++) //Вычислчем гипотезу.
|
||
h_p += Theta[i] * data[j][i];
|
||
|
||
delta_Error = this->errorCalc(h_p, result[j]); //Вычисляем ошибку.
|
||
update_Theta += delta_Error * data[j][k]; //Вычисляем обновляющее значение.
|
||
}
|
||
|
||
TempTheta[k] = Theta[k] - alfa * (update_Theta / rowData);
|
||
}
|
||
|
||
for(int n = 0; n < colData; n++) //Обновляем значения Theta.
|
||
{
|
||
Theta[n] = TempTheta[n];
|
||
}
|
||
|
||
return true;
|
||
}
|
||
|
||
bool LinearRegression::calculation()
|
||
{
|
||
if(startCalculation && numCalculation)
|
||
{
|
||
update();
|
||
numCalculation--;
|
||
return true;
|
||
} else {
|
||
return false;
|
||
}
|
||
}
|
||
|
||
float algorithm::LinearRegressionCirc::errorCalc(float hp, float res) const {
|
||
|
||
float error = hp - res;
|
||
|
||
if(error > math::constants::pi) { //Выбираем наименьшее значение длинны, так как находимся на окружности.
|
||
error = error - math::constants::pi2;
|
||
} else if (error <= -math::constants::pi) {
|
||
error = error + math::constants::pi2;
|
||
}
|
||
|
||
return error;
|
||
|
||
}
|
||
|
||
float algorithm::LinearRegression::getError() const {
|
||
|
||
float h_p = 0.0f;
|
||
float update_Theta = 0.0f;
|
||
float delta_Error = 0.0f;
|
||
|
||
for(int k = 0; k < colData; k++) //Расчитываем обновление для каждого значение Theta
|
||
{
|
||
update_Theta = 0.0f;
|
||
for(int j = 0; j < rowData; j++) //Считаем суммарную ошибку.
|
||
{
|
||
h_p = 0.0f;
|
||
for(int i = 0; i < colData; i++) //Вычислчем гипотезу.
|
||
h_p += Theta[i] * data[j][i];
|
||
|
||
delta_Error += this->errorCalc(h_p, result[j]); //Вычисляем ошибку.
|
||
}
|
||
|
||
}
|
||
|
||
return delta_Error / rowData;
|
||
}
|