std :: vector行为,移动和复制

时间:2015-06-24 09:16:52

标签: c++ c++11 operator-overloading assignment-operator

我在空闲时间在cpp中进行神经网络,以便在C ++ 11中获得更多经验。但是我遇到了一些我无法弄明白的问题。

struct neuronsLayer
{
    vector<real> ac;

    neuronsLayer(int s)
    {
        std::cout<<"neuronLayer 1"<<std::endl;
        ac = vector<real>(s,0.1f);
    }
    neuronsLayer(const neuronsLayer& nl)
    {
        std::cout<<"neuronLayer 2"<<std::endl;
        ac = vector<real>(nl.ac);
    }
    neuronsLayer(neuronsLayer&& nl)
    {
        std::cout<<"neuronLayer 3"<<std::endl;
        ac = std::move(nl.ac);
    }
    neuronsLayer operator=(const neuronsLayer& nl)
    {
        std::cout<<"neuronLayer 4"<<std::endl;
        return neuronsLayer(nl);
    }
    neuronsLayer(){ std::cout<<"neuronLayer 5"<<std::endl;}
    ~neuronsLayer(){}
};

这是一个图层实现,然后:

struct network
{
    vector<neuronsLayer> hiddens;
    vector<neuronsConnection> synaps;
    real alpha;

   //std::initializer_list

    network(vector<int> layers)
    {
        alpha = 1.f;
        hiddens = vector<neuronsLayer>();//+2
        for(int& l : layers)
        {
            hiddens.push_back(neuronsLayer(l));
        }
        synaps = vector<neuronsConnection>();
        for(int i = 0 ; i < layers.size() -1 ; i++)
        {
            synaps.push_back(std::move(neuronsConnection(layers[i],layers[i+1])));
        }
    }

    void forward(vector<real> input)
    {
        hiddens[0].ac = input;
        for (int layer = 0; layer < hiddens.size() -1; ++layer)
        {
            for(int i = 0 ; i < synaps[layer].x ; i++)
            {
                for(int j = 0 ; j < synaps[layer].y ; j++)
                {
                    hiddens[layer+1].ac[i] += hiddens[layer].ac[j] * synaps[layer].w[i + synaps[layer].x * j]; //+ activation +biais
                }
            }
            for(int i = 0 ; i < hiddens[layer].ac.size() ; i ++)
                hiddens[layer+1].ac[i] = 1.f/(1+exp(-hiddens[layer+1].ac[i]));
        }
    }

    void backward(vector<real> expected)
    {
        vector<real> error(expected);
        for(int i = 0 ; i < error.size(); i ++)
        {
            error[i] = expected[i] - hiddens[hiddens.size() -1].ac[i];
        }
        for (int layer = 0; layer < hiddens.size() -1; ++layer)
        {
            for(int i = 0 ; i < synaps[layer].x ; i++)
            {
                for(int j = 0 ; j < synaps[layer].y ; j++)
                {
                    real dw = error[i]*(1+2*exp(-hiddens[0].ac[i])/(1+exp(-hiddens[0].ac[i])));
                    synaps[layer].w[i + synaps[layer].x * j] += dw*alpha;
                }
            }
        }
    }

主要:

int main(int argc, char** argv)
{
    vector<int> net = {64,2};
    network nn(net);
    vector<float> o = {1,0};
    vector<float> t = {0,1};

    auto rOne = std::bind(std::normal_distribution<float>(6,1), std::default_random_engine{});
    auto rTwo = std::bind(std::normal_distribution<float>(3,1), std::default_random_engine{});

    auto gOne = [&](){
        int x=rOne(),y=rOne();
        //while(x=rOne > 8 or x < 0);
        //while(y=rOne > 8 or y < 0);
        std::vector<real> tbr (64,0);
        tbr[x + y*8] = 1.0;
        return tbr;
    };

    auto gTwo = [&](){
        int x=rTwo(),y=rTwo();
        //while(x=rTwo > 8 or x < 0);
        //while(y=rTwo > 8 or y < 0);
        std::vector<real> tbr (64,0);
        tbr[x + y*8] = 1.0;
        return tbr;
    };

    for(int i = 0 ; i < 5000 ; i++)
    {
        nn.forward(gOne());
        nn.backward(o);
        nn.forward(gTwo());
        nn.backward(t);
    }

我有一个主要问题和两个问题:

1)当执行后退时,我在执行期间收到SEGFAULT,似乎hiddens [0]为空。所以我可能(有点轻描淡写)误解了移动是如何运作的?

Program received signal SIGSEGV, Segmentation fault. 
0x0000000000402159 in network::backward (this=0x7fffffffe190, expected=...) at dnn3.cpp:171
171   real dw = error[i]*(1+2*exp(-hiddens[0].ac[i])/(1+exp( hiddens[0].ac[i])));
(gdb) p i
$1 = 0
(gdb) p hiddens[0].ac[i]
$2 = (__gnu_cxx::__alloc_traits<std::allocator<float> >::value_type &) @0x3f0000003f000000: <error reading variable>

2)在此之前,程序的输出是:

neuronLayer 1
neuronLayer 3
neuronLayer 1
neuronLayer 3
neuronLayer 2

为什么调用复制构造函数?我只创建了2个图层,并且它们都是按照完全相同的过程生成的,并且只有其中一个使用此构造函数。而且我无法理解为什么需要它。

3)关于绑定对象rOne和rTwo,它们总是返回相同的值吗?因为当我戳入gOne输出时,似乎它返回了两倍相同的值。这是正常的吗?

提前致谢, 马克。

编辑: 如上所述:

(gdb) p hiddens
 $1 = {<std::_Vector_base<neuronsLayer, std::allocator<neuronsLayer> >> = { _M_impl = {<std::allocator<neuronsLayer>> ={<__gnu_cxx::new_allocator<neuronsLayer>> = {<No data fields>}, <No data fields>},_M_start = 0x60c1a0, _M_finish = 0x60c1d0, _M_end_of_storage = 0x60c1d0}}, <No data fields>}
(gdb) p hiddens[0].ac
$2 = {<std::_Vector_base<float, std::allocator<float> >> = { _M_impl = {<std::allocator<float>> = {<__gnu_cxx::new_allocator<float>> = {<No data fields>}, <No data fields>}, _M_start = 0x3f0000003f000000, _M_finish = 0x3f0000003f000000, _M_end_of_storage = 0x60c2e0}}, <No data fields>}

编辑2:

Breakpoint 1, network::forward (this=0x7fffffffe190, input=...)
(gdb) p hiddens
$1 = {<std::_Vector_base<neuronsLayer, std::allocator<neuronsLayer> >> = {_M_impl = {<std::allocator<neuronsLayer>> = {<__gnu_cxx::new_allocator<neuronsLayer>> = {<No data fields>}, <No data fields>},_M_start = 0x60d1a0, _M_finish = 0x60d1d0, _M_end_of_storage = 0x60d1d0}}, <No data fields>}
(gdb) p hiddens[0]
$2 = (__gnu_cxx::__alloc_traits<std::allocator<neuronsLayer> >::value_type &) @0x60d1a0: { ac = {<std::_Vector_base<float, std::allocator<float> >> = { _M_impl = {<std::allocator<float>> = {<__gnu_cxx::new_allocator<float>> = {<No data fields>}, <No data fields>}, _M_start = 0x60d1e0, _M_finish = 0x60d2e0, _M_end_of_storage = 0x60d2e0}}, <No data fields>}}
(gdb) p hiddens[0].ac
$3 = {<std::_Vector_base<float, std::allocator<float> >> = { _M_impl = {<std::allocator<float>> = {<__gnu_cxx::new_allocator<float>> = {<No data fields>}, <No data fields>}, _M_start = 0x60d1e0, _M_finish = 0x60d2e0, _M_end_of_storage = 0x60d2e0}}, <No data fields>}
(gdb) p hiddens[1]
$4 = (__gnu_cxx::__alloc_traits<std::allocator<neuronsLayer> >::value_type &) @0x60d1b8: { ac = {<std::_Vector_base<float, std::allocator<float> >> = _M_impl = {<std::allocator<float>> = {<__gnu_cxx::new_allocator<float>> = {<No data fields>}, <No data fields>}, _M_start = 0x60d180, _M_finish = 0x60d188, _M_end_of_storage = 0x60d188}}, <No data fields>}}
(gdb) p hiddens[1].ac[0] 
$5 = (__gnu_cxx::__alloc_traits<std::allocator<float> >::value_type &) @0x60d180: 0.100000001
(gdb) p hiddens[0].ac[0]
$6 = (__gnu_cxx::__alloc_traits<std::allocator<float> >::value_type &) @0x60d1e0: 0.100000001

1 个答案:

答案 0 :(得分:2)

neuronsLayer operator=(const neuronsLayer& nl)
{
    std::cout<<"neuronLayer 4"<<std::endl;
    return neuronsLayer(nl);
}

赋值运算符没有按照您的意愿执行操作。它确实根据传入的neuronsLayer复制了一个临时nl对象,而不是修改其调用者的内容。

应该是

neuronsLayer& operator=(const neuronsLayer& nl)
{
    std::cout<<"neuronLayer 4"<<std::endl;
    ac = nl.ac;
    return *this;
}

编辑: 如上所述:

(gdb) p hiddens
 $1 = {<std::_Vector_base<neuronsLayer, std::allocator<neuronsLayer> >> = { _M_impl = {<std::allocator<neuronsLayer>> ={<__gnu_cxx::new_allocator<neuronsLayer>> = {<No data fields>}, <No data fields>},_M_start = 0x60c1a0, _M_finish = 0x60c1d0, _M_end_of_storage = 0x60c1d0}}, <No data fields>}
(gdb) p hiddens[0].ac
$2 = {<std::_Vector_base<float, std::allocator<float> >> = { _M_impl = {<std::allocator<float>> = {<__gnu_cxx::new_allocator<float>> = {<No data fields>}, <No data fields>}, _M_start = 0x3f0000003f000000, _M_finish = 0x3f0000003f000000, _M_end_of_storage = 0x60c2e0}}, <No data fields>}

由于hiddens[0].ac _M_start等于_M_finish,因此它为空,因此获取其第0个元素会导致分段错误。