编写ruby代码的正确方法是什么?

时间:2015-06-20 14:02:41

标签: ruby

我正在解决金字塔问题,其中一个数组随着时间的推移通过在每次迭代中减去两个连续的数字而缩减为单个元素。

  

输入:[1,5,9,2,3,5,6]

     

迭代

     

[4,4,-7,1,2,1],

     

[0,-11,8,1,-1],

     

[ - 11,19,-7,-2],

     

[30,-26,5],

     

[ - 56,31],

     

[87]

     

输出:87

解决此问题的最佳方式或ruby方式是什么?这可以通过继承数组并创建一个新类来完成,但我不知道如何。请帮忙。我写这段代码来解决它:

a = [1,5,9,2,3,5,6]

class Array
  def pyr
    a = self.each_cons(2).to_a.map! { |e| e[1] - e[0] }
    a
  end
end

while a.length > 1
  a = a.pyr
  ans = a[0]
end
p ans

4 个答案:

答案 0 :(得分:4)

我看到了三种解决方法。

重新打开Array类

当然,如果在您的特定ruby脚本/项目中这是数组的基本功能,请重新打开该类。但是如果你打算重新开课,至少要确保这个名字是有意义的。 pyr?为什么不写一个全名,所以不可能发生冲突,比如next_pyramid_iteration(我从来没有听说过这个金字塔问题,所以请原谅我,如果我是这里的基础)。

使类继承自Array

class Pyramid < Array

  def next_iteration 
    self.each_const(2).map! { |e| e[1] - e[o] }
  end

end

然后你的计算就会变成

pyramid = Pyramid.new([1,5,9,2,3,5,6])

while pyramid.length > 1
  pyramid.next_iteration
end

pyramid[0]  

创建一个特定的类进行计算

我不太确定你想要实现什么,但为什么不只是创建一个知道如何计算金字塔的特定类?

class PyramidCalculator

  def initialize(arr)
    @pyramid = arr
  end

  def calculate
    while @pyramid.length > 1
      do_next_iteration
    end
    @pyramid.first  
  end 

  def self.calculate(arr)
    PyramidCalculator.new(arr).calculate
  end

  protected

  def do_next_iteration
    @pyramid = @pyramid.each_const(2).map! { |e| e[1] - e[o] }
  end 
end

因为我添加了一个便利类方法,你现在可以按如下方式计算结果:

PyramidCalculator.calculate([1,5,9,2,3,5,6])

我个人的偏好是最后一种选择:)

答案 1 :(得分:3)

我会把它作为一个双线。

#include <iostream>
#include <cassert>
#include <cstdlib>
#include <vector>
#include "ConsoleColor.hpp"

using namespace std;

namespace Neural {
    class Neuron;
    typedef vector<Neuron> Layer;

    // ******************** Class: Connection ******************** //
    class Connection {
    public:
        Connection();
        void setOutput(const double& outputVal) { myOutputVal = outputVal; }
        void setWeight(const double& weight) { myDeltaWeight = myWeight - weight; myWeight = weight; }
        double getOutput(void) const { return myOutputVal; }
        double getWeight(void) const { return myWeight; }
    private:
        static double randomizeWeight(void) { return rand() / double(RAND_MAX); }
        double myOutputVal;
        double myWeight;
        double myDeltaWeight;
    };

    Connection::Connection() { 
        myOutputVal = 0;
        myWeight = Connection::randomizeWeight();
        myDeltaWeight = myWeight;
        cout << "Weight: " << myWeight << endl;
    }

    // ******************** Class: Neuron ************************ //
    class Neuron {
    public:
        Neuron();
        void setIndex(const unsigned int& index) { myIndex = index; }
        void setOutput(const double& output) { myConnection.setOutput(output); }
        unsigned int getIndex(void) const { return myIndex; }
        double getOutput(void) const { return myConnection.getOutput(); }
        void feedForward(const Layer& prevLayer);
        void printOutput(void) const;

    private:
        inline static double transfer(const double& weightedSum);
        Connection myConnection;
        unsigned int myIndex;
    };

    Neuron::Neuron() : myIndex(0), myConnection() { } 
    double Neuron::transfer(const double& weightedSum) { return 1 / double((1 + exp(-weightedSum))); }
    void Neuron::printOutput(void) const { cout << "Neuron " << myIndex << ':' << myConnection.getOutput() << endl; }
    void Neuron::feedForward(const Layer& prevLayer) {
        // Weight sum of the previous layer's output values
        double weightedSum = 0;
        for (unsigned int i = 0; i < prevLayer.size(); ++i) {
            weightedSum += prevLayer[i].getOutput()*myConnection.getWeight();
            cout << "Neuron " << i << " from prevLayer has output: " << prevLayer[i].getOutput() << endl;
            cout << "Weighted sum: " << weightedSum << endl;
        }
        // Transfer function
        myConnection.setOutput(Neuron::transfer(weightedSum));
        cout << "Transfer: " << myConnection.getOutput() << endl;
    }

    // ******************** Class: Net *************************** //
    class Net {
    public:
        Net(const vector<unsigned int>& topology);
        void setTarget(const vector<double>& targetVals);
        void feedForward(const vector<double>& inputVals);
        void backPropagate(void);
        void printOutput(void) const;
    private:
        vector<Layer> myLayers;
        vector<double> myTargetVals;

    };
    Net::Net(const vector<unsigned int>& topology) : myTargetVals() {
        assert(topology.size() > 0);
        for (unsigned int i = 0; i < topology.size(); ++i) { // Creating the layers
            myLayers.push_back(Layer(((i + 1) == topology.size()) ? topology[i] : topology[i] + 1)); // +1 is for bias neuron
            // Setting each neurons index inside layer
            for (unsigned int j = 0; j < myLayers[i].size(); ++j) {
                myLayers[i][j].setIndex(j); 
            }
            // Console log
            cout << red;
            if (i == 0) {
                cout << "Input layer (" << myLayers[i].size() << " neurons including bias neuron) created." << endl;
                myLayers[i].back().setOutput(1);
            }
            else if (i < topology.size() - 1) { 
                cout << "Hidden layer " << i << " (" << myLayers[i].size() << " neurons including bias neuron) created." << endl; 
                myLayers[i].back().setOutput(1);
            }
            else { cout << "Output layer (" << myLayers[i].size() << " neurons) created." << endl; }
            cout << white;
        }
    }
    void Net::setTarget(const vector<double>& targetVals) { assert(targetVals.size() == myLayers.back().size()); myTargetVals = targetVals; }
    void Net::feedForward(const vector<double>& inputVals) {
        assert(myLayers[0].size() - 1 == inputVals.size());
        for (unsigned int i = 0; i < inputVals.size(); ++i) { // Setting input vals to input layer
            cout << yellow << "Setting input vals...";
            myLayers[0][i].setOutput(inputVals[i]); // myLayers[0] is the input layer
            cout << "myLayer[0][" << i << "].getOutput()==" << myLayers[0][i].getOutput() << white << endl;
        }
        for (unsigned int i = 1; i < myLayers.size() - 1; ++i) { // Updating hidden layers
            for (unsigned int j = 0; j < myLayers[i].size() - 1; ++j) { // - 1 because bias neurons do not have input
                cout << "myLayers[" << i << "].size()==" << myLayers[i].size() << endl;
                cout << green << "Updating neuron " << j << " inside layer " << i << white << endl;
                myLayers[i][j].feedForward(myLayers[i - 1]); // Updating the neurons output based on the neurons of the previous layer
            }
        }
        for (unsigned int i = 0; i < myLayers.back().size(); ++i) { // Updating output layer
            cout << green << "Updating output neuron " << i << ": " << white << endl;
            const Layer& prevLayer = myLayers[myLayers.size() - 2];
            myLayers.back()[i].feedForward(prevLayer); // Updating the neurons output based on the neurons of the previous layer
        }
    }
    void Net::printOutput(void) const {
        for (unsigned int i = 0; i < myLayers.back().size(); ++i) {
            cout << blue;  myLayers.back()[i].printOutput(); cout << white;
        }
    }
    void Net::backPropagate(void) {

    }
}

int main(int argc, char* argv[]) {
    vector<unsigned int> myTopology;
    myTopology.push_back(3);
    myTopology.push_back(4);
    myTopology.push_back(2);
    myTopology.push_back(2);

    cout << myTopology.size() << endl << endl; // myTopology == {3, 4, 2 ,1}

    vector<double> myTargetVals= {0.5,1};
    vector<double> myInputVals= {1, 0.5, 1};

    Neural::Net myNet(myTopology);
    myNet.feedForward(myInputVals);
    myNet.printOutput();

    return 0;
}

答案 2 :(得分:2)

在不攻击Array类的情况下,将它变成一个简单的函数当然很容易:

def pyr(ary)
  return ary[0] if ary.length < 2
  pyr(ary.each_cons(2).map { |e| e[1] - e[0] })
end

p pyr [1,5,9,2,3,5,6]    # => 87

如果您希望将答案视为单元素数组而不是标量,请使用return ary

如果您更喜欢迭代到递归或者有一个非常大的数组:

def pyr(ary)
  ary = ary.each_cons(2).map { |e| e[1] - e[0] } while ary.length > 1
  ary
end

通过将其封装为函数而不是内联,您可以对任意数量的数组执行操作,并且在原始输入数组上具有非破坏性。

答案 3 :(得分:2)

没有必要通过连续计算差异来计算最终值,这需要(n*(n-1)/2次减法和相同数量的加法,其中n是数组a的大小1}}。相反,我们可以通过对表单的n项进行求和来计算该值:

( - 1) K +我 bin_coeff第(n-1,I)* A [1]

代表i = 0..(n-1),其中:

    如果数组中包含偶数个元素,则
  • K等于0,否则K等于1;和
  • bin_coeff(n,i)是用于一次选择&#34; ni的二项式系数&#34; (n!/i!*(n-i)!)。

我知道你在想什么:计算每个二项式系数需要一些工作。是的,但是这可以通过从bin_coeff(n-1,i+1)计算bin_coeff(n-1,i)等有效的方式(我在下面没有做过)来完成,当然,那个学术的,如同没有人可能真正使用我建议的方法。

(我希望没有人会要求提供证明,但如果提出请求,我会尝试强制要求。)

<强>代码

class Fixnum
  def factorial
    (1..self).reduce(1) { |t,i| t*i }
  end

  def bin_coeff m
    self.factorial/(m.factorial*(self-m).factorial)
  end
end

def pyramid_sum(a)
  n = a.size-1
  sign = n.even? ? -1 : 1
  (0..n).reduce(0) do |t,i|
    sign = -sign
    t + sign * n.bin_coeff(i) * a[i]
  end
end

<强>实施例

pyramid_sum [1, 5]                #=> 4 
pyramid_sum [1, 5, 9] #           #=> 0 
pyramid_sum [1, 5, 9, 2]          #=> -11 
pyramid_sum [1, 5, 9, 2, 3]       #=> 30 
pyramid_sum [1, 5, 9, 2, 3, 5]    #=> -56 
pyramid_sum [1, 5, 9, 2, 3, 5, 6] #=> 87