Python で音声信号処理: http://aidiary.hatenablog.com/entry/20110514/1305377659
Speech Signal Processing Toolkit (SPTK)の使い方: http://aidiary.hatenablog.com/entry/20120701/1341126474
Digital Signal Processing in Python, Think DSP, Green Tea Press: https://greenteapress.com/wp/think-dsp/
In below code, only 2 objects are created. "std2 = std1" means that variable "std1", that is a reference to an object, is copied to "std2" such that now "std2" also refers to the same object referred to by "std1". Any modification applied on that object using "std2" will also be observed by "std1".
This is very different with C++, where a variable can either hold the object itself or the reference to that object. In case a variable is holding the object, "std2 = std1; (or Student std2 = std1; Student std2(std1);)" means copy (NOT reference) assignment (or copy constructor), and "std2" and "std1" hold different objects.
Student std, std1, // Declare four variables of
std2, std3; // type Student.
std = new Student(); // Create a new object belonging
// to the class Student, and
// store a reference to that
// object in the variable std.
std1 = new Student(); // Create a second Student object
// and store a reference to
// it in the variable std1.
std2 = std1; // Copy the reference value in std1
// into the variable std2.
std3 = null; // Store a null reference in the
// variable std3.
Refer to http://javadude.com/articles/passbyvalue.htm
Example:
public class JavaPassByValue {
static void funcA(int z) {
z = 1000;
}
static void funcB(int[] z) {
z[0] = 1000;
}
static void funcC(int[] z) {
z[0] = 256;
z = new int[200]; // z is assigned a new value of the pointer to int[200],
// which is different with the passed pointer/address value.
z[0] = 123;
}
public static void main(String[] args) {
int x = 12;
System.out.println(x);
funcA(x); // a value '12' is passed to funcA, x itself is not changed.
System.out.println(x);
int[] y = new int[1];
y[0] = 13;
System.out.println(y[0]);
funcB(y); // a value of int[], which is a pointer, is passed to funcB.
System.out.println(y[0]);
System.out.println(y[0]);
funcC(y);
System.out.println(y[0]);
}
}
will output:
12
12
13
1000
1000
256
In C++, assignment operator means copy assignment. In Java, depends on the type of class it can behave as both copy OR reference assignment.
import java.util.Date;
public class TestMutable {
public static void main(String[] args) {
{
// Assignment operator (=) in immutable class means copy assignment.
// String is a immutable class.
String src = "abc";
String dst = src;
System.out.println("src = " + src);
System.out.println("dst = " + dst);
dst = dst + "def";
System.out.println("src = " + src);
System.out.println("dst = " + dst);
}
{
// Assignment operator (=) in mutable class means reference assignment.
// Date is a mutable class.
Date src = new Date(123);
Date dst = src;
System.out.println("src = " + src.getTime());
System.out.println("dst = " + dst.getTime());
dst.setTime(456);
System.out.println("src = " + src.getTime());
System.out.println("dst = " + dst.getTime());
}
{
// Array is a mutable class.
int[] src = {7, 8, 9};
int[] dst = src;
System.out.println("src = " + src[0] + ", " + src[1] + ", " + src[2]);
System.out.println("dst = " + dst[0] + ", " + dst[1] + ", " + dst[2]);
dst[0] = 11; dst[1] = 12; dst[2] = 13;
System.out.println("src = " + src[0] + ", " + src[1] + ", " + src[2]);
System.out.println("dst = " + dst[0] + ", " + dst[1] + ", " + dst[2]);
}
}
}
The above Java code will output:
src = abc
dst = abc
src = abc
dst = abcdef
src = 123
dst = 123
src = 456
dst = 456
src = 7, 8, 9
dst = 7, 8, 9
src = 11, 12, 13
dst = 11, 12, 13
Below code:
#include <iostream>
using namespace std;
void funcA(char* chptr)
{
chptr[0] = 'A';
}
void funcB(char* chptr)
{
chptr[0] = 'B';
chptr = new char[8]; // This newly new-ed chptr is NOT deleted.
chptr[0] = 'C';
}
void main( int argc, char* argv[] )
{
{
char* myptr = new char[8]; // a space of 8chars is allocated in the heap.
myptr[0] = '0';
cout << myptr[0] << endl;
funcA(myptr);
cout << myptr[0] << endl;
funcB(myptr);
cout << myptr[0] << endl;
delete[] myptr;
}
}
will output:
0
A
B
So, just like java, call-by-value. The value can be value of object or value of address.
However, be careful, if the argument is of reference type, then the alias, i.e., the object itself but with different name, is passed to the function.
g++ option for expanding macro is -E -dD -Dmacro=0
g++ -E -dD -D_WIN32=1 main.cpp >| expanded2.cpp
Below code:
class MyClass
{
private:
int mMyInt;
private:
// Default constructor, copy constructor, and copy assignment operator are
// declared private AND NOT defined. These ctors are NOT intended to be used.
MyClass();
MyClass(const MyClass&);
MyClass& operator= (const MyClass&);
public:
MyClass(int input) : mMyInt(input) {
cout << "MyClass with input " << mMyInt << " is constructed." << endl;
}
virtual ~MyClass() {
cout << "MyClass with input " << mMyInt << " is destructed." << endl;
}
int getMyInt() const { return mMyInt; }
void setMyInt(const int newinput) { mMyInt = newinput; }
void linkError(MyClass& objAlias) {
MyClass newObj(objAlias); // copy ctor is NOT defined, so link error when it is called.
}
};
//void funcCompileErr1(const MyClass* mcptr) {
// mcptr->setMyInt(128); // mcptr points to a MyClass constant object. So cannot set the pointed object.
//}
//void funcCompileErr2(MyClass* const mcptr) {
// mcptr = new MyClass(128); // mcptr is a constant pointer, so cannot assign new pointer to mcptr.
//}
//void funcCompileErr3(const MyClass* const mcptr) {
// mcptr->setMyInt(128);
// mcptr = new MyClass(128);
//}
void funcCompileOK1(MyClass* const mcptr) {
mcptr->setMyInt(128); // mcptr is a constant pointer, BUT the pointed object is NOT const, so OK to set the obj.
}
void funcCompileOK2(const MyClass* mcptr) {
mcptr = new MyClass(128); // meaningless?
}
void funcC(MyClass* mcptr) {
mcptr->setMyInt(256);
}
void funcD(MyClass* mcptr) {
mcptr->setMyInt(512);
// Local mcptr is assigned to point to newly constructed object.
mcptr = new MyClass(17); // memory leak!
mcptr->setMyInt(1024);
}
void funcE(MyClass* mcptr) {
mcptr->setMyInt(512);
// Local mcptr is assigned to point to newly costructed object.
mcptr = new MyClass(17);
mcptr->setMyInt(1024);
delete mcptr;
}
void funcPassRef(MyClass& objAlias) {
objAlias.setMyInt(555);
}
//void funcPassRefCompileErr1(const MyClass& objAlias) {
// objAlias.setMyInt(555);
//}
//void funcPassRefCompileErr2(MyClass& objAlias) {
// MyClass newObj = objAlias; // cannot access private copy assignment operator
// MyClass newObj2(objAlias); // cannot access private copy ctor
//}
void main( int argc, char* argv[] )
{
{
cout << "======= obj1ptr =======" << endl;
MyClass* obj1ptr = new MyClass(1); // memory leak!
}
{
cout << "======= obj2ptr =======" << endl;
MyClass* obj2ptr = new MyClass(2);
delete obj2ptr;
}
{
cout << "======= obj3ptr =======" << endl;
MyClass* obj3ptr = new MyClass(3);
cout << obj3ptr->getMyInt() << endl;
funcC(obj3ptr);
cout << obj3ptr->getMyInt() << endl;
delete obj3ptr;
}
{
cout << "======= obj4ptr =======" << endl;
MyClass* obj4ptr = new MyClass(4);
cout << obj4ptr ->getMyInt() << endl;
funcD(obj4ptr);
cout << obj4ptr ->getMyInt() << endl;
delete obj4ptr;
}
{
cout << "======= obj5ptr =======" << endl;
MyClass* obj5ptr = new MyClass(5);
cout << obj5ptr ->getMyInt() << endl;
funcE(obj5ptr);
cout << obj5ptr ->getMyInt() << endl;
delete obj5ptr;
}
//{
// MyClass aaa(567);
// aaa.linkError(aaa); // link error!
//}
}
will output:
======= obj1ptr =======
MyClass with input 1 is constructed.
======= obj2ptr =======
MyClass with input 2 is constructed.
MyClass with input 2 is destructed.
======= obj3ptr =======
MyClass with input 3 is constructed.
3
256
MyClass with input 256 is destructed.
======= obj4ptr =======
MyClass with input 4 is constructed.
4
MyClass with input 17 is constructed.
512
MyClass with input 512 is destructed.
======= obj5ptr =======
MyClass with input 5 is constructed.
5
MyClass with input 17 is constructed.
MyClass with input 1024 is destructed.
512
MyClass with input 512 is destructed.
Is calling "delete this;" from a member function OK?
OK, but the instance (pointed by this) must be obtained through new().
Reason:
Member function (method) can be depicted as a 'static' function (with quotation, because just for imagining), so actually its existence is NOT depending on the existence of the instance.
When a member function is called, it is like that the instance (or its reference/pointer) is passed to this 'static' member function. So, even when delete this is done in this member function, the instance is just correctly deleted.
The member function stays untouched (because it is 'static').
Please see:
http://ch.cri-mw.co.jp/hirase/187.html
http://slashdot.jp/journal/274056/%5BC%2B%2B%5D-delete-this
Below code will not compile:
template <typename T>
const int recbinSearch(std::vector<T>::const_iterator head, const int length,
const T query) {
It must be written like this:
template <typename T>
const int recbinSearch(typename std::vector<T>::const_iterator head, const int length,
const T query) {
See also:
http://www.daniweb.com/software-development/cpp/threads/397627
For a better IO performance in C++, it is better to use scanf/printf instead of cin/cout of iostream. Here, in the C++ code we do NOT "#include <stdio.h>" but we do "#include <cstdio>".
Other C-traditional headers such as stdlib.h are included with cstdlib, i.e., add c prefix and remove the .h.
Additional info:
// Allow input buffering and thus cin speed up. Use with caution!
std::cin.sync_with_stdio(false);
Meyers' More Effective C++ Item 5.
template<class T>
class Array {
public:
Array(int size); // This single-argument ctor has a side effect of permitting implicit conversion from int to Array<int>
...
}
bool operator==(const Array<int>& lhs, const Array<int>& rhs); // operator== is OK to compare 2 Array<int>s.
Array<int> a(10);
Array<int> b(10);
...
if ( a == b[0] ) // This is OK, i.e., the int b[0] will implicitly be converted to Array<int>(b[0]). Equivalent to below:
if ( a == static_cast<Array<int> >(b[0]) )
So to prevent the above UN-intended use, declare Array(int size) as explicit, so that NO implicit conversion from int to Array<int>.
class Array {
public:
explicit Array(int size); // Forbid implicit conversion from int to Array<int>,
// i.e., must explicit-ly call the single-argument constructor to create Array of size size.
...
}
if ( a == b[0] ) // Error. NO implicit conversion from int b[0] to Array<int>(b[0]).
Meyers' More Effective C++ Item 6.
// prefix form: increment and fetch
UPInt& UPInt::operator++() {
*this += 1; // increment
return *this; // fetch
}
// postfix form: fetch and increment
const UPInt UPInt::operator++( int ) {
UPInt oldValue = *this; // fetch
++(*this); // increment, using prefix form
return oldValue; // return what was fetched
}
Observations:
Prefix form is more efficient (no ctor and dtor of oldValue). When dealing with user-defined types where ctor and dtor may be expensive, prefix form should be used whenever possible.
Postfix form should be implemented in terms of its prefix counterpart.
See Item 25 of More Effective C++:
class NLComponent {
public:
virtual ostream& print(ostream& s) const = 0;
...
};
class TextBlock: public NLComponent {
public:
virtual ostream& print(ostream& s) const;
...
};
class Graphic: public NLComponent {
public:
virtual ostream& print(ostream& s) const;
...
};
inline
ostream& operator<<(ostream& s, const NLComponent& c)
{
return c.print(s);
}
We need to specify a comparator to show how 2 instances should be ordered.
struct crhcomp {
bool operator() (const CRightHeight& lhs, const CRightHeight& rhs) const {
return !(lhs.getRight() < rhs.getRight());
}
};
int main(void)
{
std::priority_queue<CRightHeight, std::vector<CRightHeight>, crhcomp> Q;
}
In many 3rd party library, logging is provided as operator<<. But our logging is using std::string. So, in order to use the 3rd party library logging's in our logging, we need to "convert" the operator<< to std::string. Here, std::stringstream is our saviour.
Refer to: http://99blues.dyndns.org/blog/2010/02/std_stringstream/
void reset_stringstream(std::stringstream& ss)
{
static const std::string empty_string;
ss.str(empty_string);
ss.clear();
ss << std::dec;
}
...
{
std::stringstream ss;
ss << fixedRegion.GetIndex();
LFATAL(( "fixedRegion.GetIndex() = %s", ss.str() ));
reset_stringstream( ss );
ss << fixedRegion.GetSize();
LFATAL(( "fixedRegion.GetSize() = %s", ss.str() ));
}
In a cpp file, enclose the file contents with unnamed namespace, which implies that the global variable etc., defined inside this unnamed namespace only valid inside this file.
namespace {
...
int ga;
void func () {
}
...
}
As below, the function is declared in this file, but defined in other files.
namespace NLibA {
int funca(); // Just declared here, but defined in other files.
double funcb();
}
namespace NMyApp {
main() {
int a = NLibA::funca();
}
}
Compile below with "g++ -o test test.cpp func.cpp"
test.cpp
#include <iostream>
namespace {
using namespace std;
int a = 200;
void funcC(char& chref) {
chref = 'K';
chref = 'J';
}
}
namespace NFunc {
void funcD(void);
}
int main(int argc, char* argv[]) {
char test = 'H';
cout << test << endl;
funcC( test );
cout << test << endl;
NFunc::funcD();
cout << a << endl;
return 0;
}
func.cpp
#include <iostream>
namespace {
using namespace std;
int a = 100;
}
namespace NFunc {
void funcD(void) {
cout << a << endl;
}
}
Result
H
J
100
200
Sometimes we want to modify behavior of a class, but we want to keep the base class as general as possible. So, for example the base class is a class to compute distance between 2 images, which might be SSD or mutual information distance (see ITK).
A way to do this is by composition, that is, create a class which has an SSD class or mutual information class as its member. Then, in the implementation of this new class, reimplement all member class's interface by calling again the member's class interface.
An easier way is to derive the base metric class, but the base class is specified as template:
namespace itk {
template <class TSupermetric>
class ImageAndBSplineGridCost:
public TSupermetric
{
typedef TSupermetric Superclass;
....
void GetValueAndDerivative( params, val, deriv ) {
this->Superclass::GetValueAndDerivative( params, val, deriv );
// Then modify val and deriv below
....
}
}
}
// Usage
typedef itk::MattesMI<FixedImageType, MovingImageType> ImageCostType;
typedef itk::ImageAndBSplineGridCost<ImageCostType> MetricType;
...
metric = MetricType::New();
...
registration->SetMetric(metric);
...
If a class is meant to be a base class (for example, abstract class for a lot of algorithm implementations), then declare the destructor of the base class as VIRTUAL. See also Meyers' Effective C++.
#include <iostream>
#include <boost/shared_ptr.hpp>
#define DTOR_COUT { std::cout << __FUNCTION__ << std::endl; }
#define PARTITION { std::cout << "==========" << std::endl; }
namespace NTest {
class Base1 {
public:
~Base1() {
DTOR_COUT;
}
};
class Derived1 : public Base1 {
public:
~Derived1() {
DTOR_COUT;
}
};
class Base2 {
public:
virtual ~Base2() {
DTOR_COUT;
}
};
class Derived2 : public Base2 {
public:
~Derived2() {
DTOR_COUT;
}
};
}
int main( void ) {
PARTITION;
NTest::Base1* p1 = new NTest::Derived1(); // member initialized
delete p1;
PARTITION;
NTest::Base2* p2 = new NTest::Derived2; // member NOT initialized
delete p2;
PARTITION;
{
boost::shared_ptr<NTest::Base1> shp1 =
boost::shared_ptr<NTest::Base1>( new NTest::Derived1() );
}
PARTITION;
return 0;
}
Reference:
When only a header file is changed, make does NOT recompile source files which include that header. The dependency of the source file to the header must be checked and recorded using -MMD and -MP during compilation. Makefile here:
Text Box
# -*- coding: utf-8 -*-
# Makefile for idtts/src
# Generate binary for translating Indonesian text to phoneme.
mkfile_abs_name := $(abspath $(lastword $(MAKEFILE_LIST)))
mkfile_dir :=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
CC=g++
# Below is CFLAGS when using boost serialization:
# CFLAGS=-c -MMD -MP -Wall
# Below is CFLAGS when using CEREAL (c++11) serialization:
CFLAGS=-c -MMD -MP -Wall -std=c++11 -I $(mkfile_dir)
LDFLAGS=
SRCS=main.cpp compiledict.cpp translator.cpp \
transimpl_part1.cpp transimpl_part2.cpp \
prefixtree/prefixtree_abs.cpp prefixtree/naiveprefixtree.cpp \
prefixtree/listprefixtree.cpp prefixtree/dartsprefixtree.cpp
BIN_NAME=idtxt2ph
DICTDIR=$(CURDIR)
OBJS=$(SRCS:.cpp=.o)
DEPS=$(SRCS:.cpp=.d)
LIBS=-lstdc++ -lboost_program_options -lboost_serialization -lboost_regex
all: $(BIN_NAME)
-include $(DEPS)
$(BIN_NAME): $(OBJS)
$(CC) $(LDFLAGS) $(OBJS) $(LIBS) -o $@
.cpp.o:
$(CC) $(CFLAGS) -D PATH_DICT=\"$(DICTDIR)\" $< -o $@
clean:
rm -f *~ $(OBJS) $(DEPS) $(BIN_NAME)
dictbin:
ls -l id_dictbin ; date ; ./$(BIN_NAME) --compile ; ls -l id_dictbin
tpo:
cat testsuit/tptst.txt | ./$(BIN_NAME) | diff testsuit/tpout -
r1:
time cat testsuit/to_read_col2 | ./$(BIN_NAME) | diff testsuit/to_read_col2.out -
r2:
time sed 's/\/,/ /g' testsuit/to_read2 | ./$(BIN_NAME) | diff testsuit/to_read2.out -
check:
cat testsuit/tptst.txt | ./$(BIN_NAME) | diff testsuit/tpout -
cat testsuit/to_read_col2 | ./$(BIN_NAME) | diff testsuit/to_read_col2.out -
sed 's/\/,/ /g' testsuit/to_read2 | ./$(BIN_NAME) | diff testsuit/to_read2.out -
timing:
time cat testsuit/tptst.txt | ./$(BIN_NAME) | diff testsuit/tpout -
time cat testsuit/to_read_col2 | ./$(BIN_NAME) | diff testsuit/to_read_col2.out -
time sed 's/\/,/ /g' testsuit/to_read2 | ./$(BIN_NAME) | diff testsuit/to_read2.out -
Now, need to ignore the .d files. How to do is:
svn propset svn:global-ignores '*.d' .
(be careful with the current directory, dot!!!!)
svn propget svn:global-ignores .
OR to do it for ALL repositories: edit ~/.subversion/config , ignore part.
Reference:
Below is the Makefile:
Text Box
# -*- coding: utf-8 -*-
# Makefile for idtts/src
# Generate binary for translating Indonesian text to phoneme.
#
# Default is release build. To build for debug: $ make buildtype=debug
# Then, can also do $ make buildtype=debug dictbin ...
mkfile_abs_name := $(abspath $(lastword $(MAKEFILE_LIST)))
mkfile_dir :=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
buildtype=release
CC=g++
# Below is CFLAGS when using boost serialization:
# CFLAGS=-c -MMD -MP -Wall
# Below is CFLAGS when using CEREAL (c++11) serialization:
CFLAGS=-c -MMD -MP -Wall -std=c++11 -I $(mkfile_dir)
LDFLAGS=
SRCS=main.cpp compiledict.cpp translator.cpp \
transimpl_part1.cpp transimpl_part2.cpp \
prefixtree/prefixtree_abs.cpp prefixtree/naiveprefixtree.cpp \
prefixtree/listprefixtree.cpp prefixtree/dartsprefixtree.cpp
# Executable for both release and debug is in current Makefile directory.
BIN_NAME=idtxt2ph
DICTDIR=$(CURDIR)
# Output directory of objects and deps files.
BUILD_DIR=Build
OD_OUTDIR=$(BUILD_DIR)/$(buildtype)
OBJS=$(SRCS:%.cpp=$(OD_OUTDIR)/%.o)
DEPS=$(SRCS:%.cpp=$(OD_OUTDIR)/%.d)
LIBS=-lstdc++ -lboost_program_options -lboost_serialization -lboost_regex
ifeq ($(buildtype),release)
# CFLAGS += -O3
else ifeq ($(buildtype),debug)
CFLAGS += -O0 -g
BIN_NAME=idtxt2ph-dbg
else
$(error buildtype must be release(default) or debug)
endif
all: $(BIN_NAME)
-include $(DEPS)
$(BIN_NAME): $(OBJS)
$(CC) $(LDFLAGS) $(OBJS) $(LIBS) -o $@
$(OD_OUTDIR)/%.o:%.cpp
@if [ ! -e `dirname $@` ]; then mkdir -p `dirname $@`; fi
$(CC) $(CFLAGS) -MF $(@:%.o=%.d) -D PATH_DICT=\"$(DICTDIR)\" $< -o $@
clean:
rm -f *~ $(BIN_NAME) idtxt2ph idtxt2ph-dbg
rm -rf $(BUILD_DIR)
# rm -f *~ $(OBJS) $(DEPS) $(BIN_NAME)
dictbin:
ls -l id_dictbin ; date ; ./$(BIN_NAME) --compile ; ls -l id_dictbin
tpo:
cat testsuit/tptst.txt | ./$(BIN_NAME) | diff testsuit/tpout -
r1:
time cat testsuit/to_read_col2 | ./$(BIN_NAME) | diff testsuit/to_read_col2.out -
r2:
time sed 's/\/,/ /g' testsuit/to_read2 | ./$(BIN_NAME) | diff testsuit/to_read2.out -
check:
cat testsuit/tptst.txt | ./$(BIN_NAME) | diff testsuit/tpout -
cat testsuit/to_read_col2 | ./$(BIN_NAME) | diff testsuit/to_read_col2.out -
sed 's/\/,/ /g' testsuit/to_read2 | ./$(BIN_NAME) | diff testsuit/to_read2.out -
timing:
time cat testsuit/tptst.txt | ./$(BIN_NAME) | diff testsuit/tpout -
time cat testsuit/to_read_col2 | ./$(BIN_NAME) | diff testsuit/to_read_col2.out -
time sed 's/\/,/ /g' testsuit/to_read2 | ./$(BIN_NAME) | diff testsuit/to_read2.out -
See above Makefile with debug build (-g)
In Mac: g++ --version and lldb --version : CLANG. Installed in Xcode!
No need to install LLDB !
To supply command line arguments of exe to LLDB, use --: lldb -- exe --lots --of --flags -a -b -c d e
Actually "--" means that lldb options is finished up until "--", after "--" there is NO option for the current lldb command: https://lldb.llvm.org/use/tutorial.html
Typical usage:
% lldb -- idtxt2ph-dbg --version
(lldb) list
(lldb) b ; b main ; b main.cpp:26 (list breakpoint, break at function main, break at file main.cpp line 26
(lldb) run (after run, use continue to continue running, not run command!)
(lldb) next
(lldb) po pathDict (print object)
(lldb) c
(lldb) q
% lldb -- idtxt2ph-dbg --version
(lldb) list; b main ; b main.cpp:27 ; list ; b
(lldb) r ; b (check whether breakpoint is hit or not)
(lldb) po argc (print object, value of argc) ; p argc (print structure of argc)
(lldb) frame variable (list local variables which are valid in the current stack frame of breakpoint)
(lldb) frame info
(lldb) frame variable *argv
(lldb) p argv ; p argv[0] ; p argv[1] ; p argv[1][4]
(lldb) c (continue until break); b
(lldb) n (1 step execution)
(lldb) s (step in)
(lldb) watch list ; thread list
(lldb) b disable 1 ; b enable 1
(lldb) expression argc=4 (put value 4 to argc)
(lldb) bt : Print stack trace, thread backtrace bt all ; th list
For debugging program that is waiting for stdin input, use lldb to attach to the process: <== NOT WORKING
% idtxt2ph-dbg in terminal 1
% pgrep idtxt2ph-dbg in terminal 2, then memo the PID
% lldb -p PID
(lldb) bt ; frame info ; bt all
(lldb) frame sel 9 (select frame 9, from information of backtrace)
(lldb) b transimpl_part1.cpp:2336
(lldb) frame sel 0 ????
In terminal 1, input tesalonika for example, will then break
(lldb) n
For debugging program that is waiting for stdin input, just use for example 1 line of input:
First, near the beginning of main, put below infinite loop:
int go_on = 1;
while( go_on ) { ; }
In terminal 2 for lldb: % lldb -w -n idtxt2ph-dbg (waitfor process with name idtxt2ph-dbg)
In terminal 1 execute: % echo desa | idtxtph-dbg
In terminal 2, confirm that the lldb attach to the process and currently in the infinite loop.
(lldb) (Do whatever needed) bt ; frame info ; bt all
(lldb) b main.cpp:129
(lldb) bt ; fr info <---- confirm that still waiting in the infinite loop.
(lldb) expr go_on = 0
(lldb) c
(lldb) fr info ; b <---- confirm that breakpoint main.cpp:129 is hit 1 time, and now fr info is in line 129.
Not so many info available. Install realgud-lldb 1.0.2
Usage: https://github.com/realgud/realgud, https://github.com/realgud/realgud/wiki, https://github.com/realgud/realgud-lldb
Theoretically, it is possible to debug gcc-compiled program with LLDB, and vice-versa to debug clang-compiled program with GDB: https://stackoverflow.com/questions/21132194/is-it-possible-to-debug-a-gcc-compiled-program-using-lldb-or-debug-a-clang-comp
Open Emacs (in the directory where the binary to be debugged existing) -> C-x 3 (vertical split) -> )M-x load-library RET realgud-lldb RET
M-x realgud--lldb -> Run lldb (like this): lldb -w -n idtxt2ph-dbg ( -(w)ait for the process with -(n)ame idtxt2ph-dbg to be launched. IN INFINITE LOOP.)
In the terminal execute: echo desa | idtxt2ph-dbg
The left side is lldb command window (Command buffer), the right side in source code window (Source buffer).
(lldb) frame var (show all variables), bt (show stack frame / backtrace), fr sel 2 (select stack frame 2), po go_on
(lldb) b main.cpp:129 OR click the left margin on source code window to put breakpoint.
(lldb) expr go_on=0
(lldb) bt : show where the program counter (current execution line)
(lldb) M-p : previous command
(lldb) s : step-in
https://www.youtube.com/watch?v=HTUE03LnaXA (emacs IDE)
https://www.youtube.com/watch?v=7SQmleA4EMo (Not so good. smartparens, swiper)
In Mac: g++-9 --version and gdb --version , both installed by brew.
Need to support DEPS and debug build in myspeak first. (DONE)
To watch:
NEED to CODESIGN gdb in Mac ! (Now here, pending)
http://tuhdo.github.io/c-ide.html (gdb-many-windows)
function xyPlot(datFile, xlbl, ylbl)
%% datFile is a binary file containing double x-axis values,
%% followed by double y-axis values.
datFile
fid = fopen(datFile);
[xy, count] = fread(fid, 'double'); fclose(fid);
count
if(mod(count,2)~=0)
disp 'ERROR: The input file must have even length!'
return
end
x = xy(1:count/2);
y = xy(count/2+1:count);
plot(x, y, 'r.-');
axis([min(x)-1 max(x)+1 min(y)-5 max(y)+5]);
%xlabel('sphere diameter [pixel]');
%ylabel('Average voxel value inside sphere');
xlabel(xlbl);
ylabel(ylbl);
fid = fopen('u.raw'); [image, count] = fread(fid, 'double'); fclose(fid); clear fid;
image = transpose(reshape(image, 512, 512));
figure(1); imshow(image);
figure(1); imshow(image, [0.01*min(image(:)), 0.01*max(image(:))]);
%% Below is to find the pixel value that is less than -10000 for example. Be careful, obidx is the index of such pixel.
obidx = find( image(:)<-10000 )
sigma = 1.2;
[x,y]=ndgrid(floor(-3*sigma):ceil(3*sigma),floor(-3*sigma):ceil(3*sigma));
DGaussXX = 1/(2*pi*sigma^4) * (x.^2/sigma^2 - 1) .* exp(-(x.^2 + y.^2)/(2*sigma^2));
DGaussYY = 1/(2*pi*sigma^4) * (y.^2/sigma^2 - 1) .* exp(-(x.^2 + y.^2)/(2*sigma^2));
lapimage = imfilter(image, DGaussXX, 'conv') + imfilter(image, DGaussYY, 'conv');
We need to use below command because Y-positive direction is down-ward.
result = filter2(kernel, inputImage);
http://hikaku.fxtec.info/metatrader/wiki.cgi?page=MetaTrader%C6%FE%CC%E7
http://stockcharts.com/school/doku.php?id=chart_school
http://jidoubaibai.com/index.html
http://www.autofx-lab.com/ja/metatrader-4-%E5%85%A5%E9%96%80
http://qtstalker.sourceforge.net/index.html
Must read, "Transferring indicator code to EA code": http://articles.mql4.com/501
About algorithm work environments
In windows7, add Notepad++ in context menu. Shortcut with (&a): http://www002.upp.so-net.ne.jp/jsrc/pc-98/reg.html
http://www.codeproject.com/Articles/570638/Ten-Cplusplus11-Features-Every-Cplusplus-Developer
If you are a programmer, try this tools. Awesome TabTextFinder: http://sourceforge.jp/projects/tabtextfinder/
View your XML graphically with xglore: http://sourceforge.net/projects/xglore/
Collect CPU information, CPU-Z: http://www.cpuid.com/softwares/cpu-z.html
Collect GPU information, GPU-Z: http://www.techpowerup.com/gpuz/
Collect HDD information, CrystalDiskInfo: http://crystalmark.info/software/CrystalDiskInfo/
http://www.atlassian.com/git/tutorial
http://lifehacker.com/5983680/how-the-heck-do-i-use-github (login credentials, password caching GitHub for windows)
http://www.vogella.com/articles/Git/article.html
https://help.github.com/ (Password caching other than GitHub for windows)
https://bitbucket.org/ Free private repository!
http://www.sourcetreeapp.com/ Git client for windows
http://betterexplained.com/articles/aha-moments-when-learning-git/
Best ONE! : Learn Enough Git to Be Dangerous: https://www.learnenough.com/git-tutorial
It seems that SVN-like workflow (i.e., checking out to working copy) is supported as 'git clone'. However 'git clone' itself IS a distributed-repository.
Use git bash. Or maybe can use sourcetree.
git config --global user.name "Your Name Here"
git config --global user.email "your_email@example.com"
git config --global core.autocrlf true (Windows: true, UNIX: input)
For binary, it is bad to do auto crlf changing, so in the .gitattributes:
filename binary
autocrlf is necessary to prevent git st shows "all modified" when for example the server is UNIX and the local is windows.
git config --global branch.autosetuprebase always
git config -l
When using UNIX, set credential.helper as below (> git 1.7.9):
man git-credential-cache
git config credential.helper 'cache --timeout=300'
Better flow to create repository:
ssh user@server
cd path/above/repo
git init --bare my-project.git
Or create repository in bitbucket Seems repository in bitbucket is bare by default. (ivansetiawantky)
http://yashigani.hatenablog.com/entry/2013/03/21/224251
For a new repository (group name: foo, repository directory: repodir):
git init --bare --shared=group repodir
chgrp -R foo repodir
see the config file. Should have core.sharedrepository = 1, receive.denyNonFastforwards = true
For existing git repository that need to be change to shared repository:
HERE, ASSUMING THAT SETGID "S" in group permission is already set!
chgrp -R foo repodir
chmod -R g=u repodir (instead of chmod -R g+rw repodir)
chmod g+s `find repodir -type d`
git init --bare --shared=group repodir
For example, the local git repository is first created for testing. But after that everything is OK and needed to be moved to git server.
In the remote server:
git init --bare --shared=group mylocalrepodir
chgrp -R foo mylocalrepodir
In local machine:
git push --mirror vm-host.ext:/home/git/gitsvr/mylocalrepodir
git clone ssh://john@example.com/path/to/my-project.git
git clone vm-host.ext:/home/git/gitsrv/a_repo
vm-host.ext is managed inside ~/ssh/config
cd my-project
# Start working on the project
Doing the above by default will set origin as central repo: git remote add origin https://ivansetiawantky@bitbucket.org/study.git
Add all existing files to the repository: git add .
git commit -m "Msg"
git add hello.py
git commit -m "Msg"
Stage with "git add", then commit with comment "git commit -m" can be done simultaneously with:
(2) git commit -am "Present tense imperative mood" ("Fix bugs", not fixed / fixes. And, no period, because it is a summary.)
(1) git add -A : This adds, modifies, and removes index entries to match the working tree. Use this to add untracked files and also staging modified tracked files.
(3) git push -u origin --all, git push -u origin master, --all is for all branch
List all git-managed file: git ls-tree --full-tree -r HEAD, git ls-tree --full-name -r HEAD
chmod -R g=u dir
gitignore.io
Giving a tag:
git tag -a v1.4 -m 'my version 1.4'
git push origin 2.0
git tag -l '2.0'
git show 2.0 (tag name)
git checkout master
git pull --rebase origin
git checkout master; git pull origin master
git checkout develop; git pull origin develop
git checkout staging; git pull origin staging
TO PULL ALL BRANCH FROM ORIGIN: git pull --rebase --all
Assume that working repo is clean, i.e., everything is committed to the working repo. First pull everything from central repo, then push everything to central bare repo.
git checkout master
git fetch origin master
git rebase -i origin/master
# Squash commits, fix up commit messages etc.
git push -u origin master
TO PUSH ALL BRANCHES TO ORIGIN (central repo): git push -u origin --all
Make sure that the local repo is in master and no commit left (i.e., local repo is clean):
git checkout master
git status -u no
Pull from all remote repo:
git pull --rebase --all
Pull from origin remote repo, branch master, develop, staging:
Check remote whether the local repo is up to date or not:
git remote show origin
If not, do git pull --rebase --all, git pull --rebase origin
git pull --rebase origin master
git checkout develop; git pull origin develop
git checkout staging; git pull origin staging
To check the remote repository log:
git fetch
git log --oneline origin/master -2
http://rfs.jp/server/git/gite-lab/git-log-remote.html
Don't forget git fetch
Do code change etc, add (stage), commit, and again make sure local repo is clean: git status -u no
Again pull from central repo
Push to central repo
git push -u origin --all
When the repo is an index.html repo and need to be open:
Will be serving in http://username.github.io/repositoryname
Do one time only:
git checkout -b gh-pages
git push -u origin gh-pages <==== Create gh-pages branch in the upstream/origin repo.
Do each time master is updated:
git checkout gh-pages
git merge master
git push -u origin gh-pages
Check available local and remote (origin) branch: git branch -a
git co ma(TAB) (master git-completion)
git branch -d gh-pages
To remove remote gh-pages:
Ensure no gh-pages in local repo (git branch -d gh-pages)
git push [-u] origin --delete gh-pages
git clone -b "v2.2.5" --depth 1 https://github.com/Reactive-Extensions/Rx.NET.git
check the tag with: git tag -l
git remote show origin
git remote
git remote -v
To see local repo log: git log
To see origin / central (bare) repo log: git log origin
Show remote origin, etc: git config -l
https://help.github.com/articles/setting-your-commit-email-address-in-git/
git config --global -l
In case the git repository size is very big such that __git_ps1 is slow, then do this:
git config bash.showDirtyState false
git config bash.showUntrackedFiles false
By this, the prompt is NOT updated, but the prompt return status is very fast.
https://stackoverflow.com/questions/4192014/git-ps1-extremely-slow-in-kernel-tree
See: https://devcenter.heroku.com/articles/git
If the heroku app is NOT existing yet, then do "heroku create" in the git repo root directory. Then push with "git push heroku master".
If the heroku app IS ALREADY existing, then do "heroku git:remote -a lit-forest-6446" in the git repo root directory (here the app name is lit-forest-6446). So, actually we are like adding a new existing-remote git repo. After that do "git remote -v" to confirm and do "git push heroku master".
In case permission denied, then try "heroku keys:add".
Or do "heroku git:remote -a lit-forest-6446 --remote my-remote-heroku-app <== my-remote-heroku-app is the alias for this remote repo."
# Create a development branch and push it to github
# The -b flag creates a new branch (http://git-scm.com/book/ch3-2.html)
# The -u sets that branch to track remote changes (http://goo.gl/sQ6OI)
git checkout -b develop
git branch
git push -u origin develop
# Create a staging branch and push it to github
git checkout -b staging
git branch
git push -u origin staging
# Now return to dev branch, make some edits, push to github.
#
# NOTE: we use 'git checkout develop' to simply change into the develop
# branch without creating it anew with the '-b' flag.
#
# NOTE ALSO: We use 'git push origin develop' rather than 'git push -u
# origin develop' because we already set up the branch to track remote
# changes once. This means that if others edit the 'develop' branch we can
# merge their changes locally with git pull --rebase.
git checkout develop
git branch
emacs -nw index.html
git commit -a -m "Edited product desc in index.html"
git push origin develop
# Now inspect staging at USERNAME-bitstarter-s-mooc.herokuapp.com
# If you need to make edits, do them in develop, preview, and then merge into staging.
# Try to keep the flow of edits one way.
#
# Here is how we fix bugs detected in staging. First, we 'git checkout
# develop' again. We make fixes (with emacs), save (with git commit -a -m),
# push them to github (git push origin develop), and then return to staging
# (git checkout staging).
#
# Now that we are back in the 'staging' branch, a status we can check with
# 'git branch', we again issue the all-important 'git merge develop'. This
# merges changes from the develop branch into staging.
git checkout develop
emacs -nw index.html # make edits and save
git commit -a -m "Fixed staging bugs in develop branch."
git push origin develop # push develop commits to github
git checkout staging
git merge develop # merge changes from develop into staging.
git push origin staging # push staging commits to github
git push staging-heroku staging:master # push again to Heroku
# Once we confirm the website version deployed on staging works as intended,
# we merge into master and then push to production.
git checkout master
git merge staging
git push production-heroku master:master
https://git-scm.com/book/en/v2/Git-Branching-Branches-in-a-Nutshell
git branch : To check which branch you are in now.
git branch -a : List all available branches (also in remote origin)
git checkout RELEASE-4.0 : switch/checkout and work in branch RELEASE-4.0
git branch my_new_branch : Just create a new my_new_branch but still in the current branch (NO switching)
git branch -d deleted : delete branch
git checkout -b NEW_BRANCH : create NEW_BRANCH and switch to NEW_BRANCH
git pull --rebase --all
git log --oneline --decorate --graph --all
Usage: project A's repository exists, and you want to start your own GitHub A's repository.
1. Fork the repository A (upstream) as your repository A (origin)
2. Clone from your repository A (origin) to your local git repo
3. Add the original repository A as upstream
To merge update from the original repository A (upstream), first pull from the original repository A (upstream) to the local clone of the forked A (origin). Then push the local clone update to the origin A.
One thing that's sort of interesting is that you never directly update your forked repo from the original ("upstream") repo after the original "fork" operation. Subsequent to that, updates to your forked repo are indirect: you pull from upstream into your cloned repo, to bring it up to date, then (if you wish), you push those changes into your forked repo (origin).
Example:
1. Login to GitHub, then FORK https://github.com/AllenDowney/ThinkPython2
2. git clone https://github.com/ivansetiawantky/ThinkPython2.git ; cd ThinkPython2
3. git remote add upstream https://github.com/AllenDowney/ThinkPython2
Verification:
git remote show : origin, upstream
git remote show {origin, upstream}
git fetch, pull, push, if without argument, then it is from origin (forked repo)
git fetch upstream
git remote -v
git pull --rebase : only rebase from origin's master
git pull --rebase --all : fetch origin, upstream, then pull from both original repo (upstream) and forked repo (origin)
git branch -a : show branch in both original repo (upstream) and forked repo (origin)
To update a forked repo with git rebase:
https://medium.com/@topspinj/how-to-git-rebase-into-a-forked-repo-c9f05e821c8a
git remote add upstream https://github.com/original-repo/goes-here.git
git fetch upstream
git rebase upstream/master
git push origin master --force
Create LOCAL feature branch (e.g. feature/multiproc), (after create local branch, even without contents, immediately push local branch to origin), do work, finish, then push feature/multiproc branch to REMOTE/ORIGIN, then pull request to REMOTE/ORIGIN's master:
https://bocoup.com/blog/git-workflow-walkthrough-feature-branches (Good tutorial series on pull request creation, handling)
Also git stash: https://qiita.com/kanatatsu64/items/8feb5bf0352d39cfa3c3
git checkout feature/multi; edit etc.
git stash save -u; git checkout master; edit etc. git push, git pull --rebase --all
git co feature/multi; git stash pop
git commit ; commit first
git rebase master ; then rebase so the current stashed-and-popped work-in-progress is now including master's newest changes
Good git tutorial from atlassian: https://www.atlassian.com/git/tutorials
https://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging
https://backlog.com/git-tutorial/branching-workflows/feature-branch-workflow/
https://yangsu.github.io/pull-request-tutorial/
If the master branch has already evolved before merging the feature branch to master branch, then it is better to rebase the feature branch based on the latest master:
git checkout master
git pull --rebase --all
git checkout feature/branch
(git commit the stashed-and-popped work-in-progress)
git rebase master
Assume NO submodules (svn externals) are used!. Assume also that the new server is https://git.company.co.jp/myproject.git
git remote show origin
git remote rm origin
git remote add origin https://git.company.co.jp/myproject.git
git push -u origin --all
git push -u origin --tags
Or:
Create a bare repository with git init --bare in https://git.company.co.jp/myproject.git
git clone --mirror <url to ORI repo> temp-dir
cd temp-dir
git remote rm origin
git remote add origin https://git.company.co.jp/myproject.git
git push -u origin --all
git push -u origin --tags
Install ITK, VTK, FLTK, ITKVTKglue, Insight Applications, build for windows.
Refer mainly to:
http://www.kkaneko.com/rinkou/cygwin/itkvisualc.html
http://www.itk.org/ITK/help/tutorials.html
It seems for cmake, if we change the configuration (for example set ITK_V3COMPATIBILITY to ON), we need to re-configure by pressing the "Configure" button again. And then press the "Generate" button.
But, when changing CMAKE_INSTALL_PREFIX to the cmake bin directory, it will ouput error when "Configure" again. Here, just DIRECTLY press "Generate".
yes | nl | head -15 <= Create 15 numbered lines of 2 columns
yes | nl | head -15 | cut -f1 <= Get the 1st column from above
yes | nl -nln | head -15 | cut -f1 <= The line number is left justified
yes | nl | head -15 | cut -f1 | sed 's/^[ \t]*//g' <= The leading white space is trimmed. Number width is 6.
yes | nl -nln -w10 | head -25 | cut -f1 | sed 's/^[ \t]*//;s/[ \t]*$//' <= Remove BOTH leading and trailing white space from each line. Number of width is 10.
yes | nl -nln -w10 | head -25 | awk '{print $1}' <= Same as above
kill `ps auxww | grep awshost1 | head -1 | awk '{print $2}'` <= Kill non responding ssh session
awk '{$1=""; print }' file.txt | sed 's/^[ \t]*//' <= Remove the first column from file.txt
How to trim trailing spaces from file set1.txt, extract final character of each line, and check whether it is ended with punctuation or not, with sed:
sed -e 's/^[ \t]*//;s/[ \t]*$//' set1.txt | sed 's/.*\(.\)$/\1/' | cat -n - | grep -v "\." | grep -v "\?" | grep -v "\!"
GNU screen
screen <= activate screen
screen -r <= reattach/resume screen session
screen -d <= input this at any tab to detach screen session
^t ? <= help
^t ^t <= Toggle between 2 tabs
^t ^u, ^t ^j, ^t [number: 0, 1] <= Navigate/select tab
^t ^c <= Create a new tab
^d OR exit <= Delete tab
^d every tab, delete/exit every tab <= Terminate screen
^t [ <= Copy mode to scroll to hidden screen
^ space <= Abort copy mode ???
Return <= Mark space in copy mode, then move cursor
> <= write to buffer until the final cursor position
^t ] <= paste
Emacs + REPL for JavaScript
C-c ! <= Open *js* buffer for REPL. Close with C-d in the REPL buffer. Can do ".help", etc.
C-x o <= Toggle between editor buffer and *js* buffer
C-c C-j <= Send current line to REPL buffer
C-c C-r <= Send highlighted region to REPL buffer (Changed to C-r C-r, because C-c means region copy)
C-c C-u <= Using npm jshint, check compilation/interpretation of the JS
C-m <= Jump to the error line from REPL buffer to the JS
http://blogs.msdn.com/b/jasonz/archive/2009/05/22/vs2010-tutorial-build-a-wpf-app-with-beta-1.aspx
http://blogs.msdn.com/b/jasonz/archive/2009/05/28/9617993.aspx
http://www.codeproject.com/Articles/140611/WPF-Tutorial-Beginning
http://www.codeproject.com/Articles/781251/Analysing-Big-Data-in-Real-Time-sharp
http://www.codeproject.com/Articles/239849/Multiple-face-detection-and-recognition-in-real
http://www.codeproject.com/Articles/196168/Contour-Analysis-for-Image-Recognition-in-C
http://www.codeproject.com/Articles/207797/Learn-MVC-Model-View-Controller-step-by-step-in
https://software.intel.com/en-us/articles/intel-integrated-performance-primitives-documentation/
http://www.inf.ethz.ch/personal/dpanozzo/libigl_tutorial/tutorial.html
http://www.codeproject.com/Articles/834674/Stage-Getting-Started-With-ArdOS-for-Arduino
http://www.codeproject.com/Articles/834177/Stage-Raspberry-Pi-as-IoT-Node-Installing-OS-and-W
http://www.codeproject.com/Articles/839230/Introduction-to-Raspberry-Pi-with-Raspbian-OS
How to use: http://physics-station.blogspot.jp/2013/03/opencv-visual-studio.html
Use OpenCV from VS2012 using NuGet package manager (sample code available): http://whoopsidaisies.hatenablog.com/entry/2013/12/03/174709
Python AI book pdf:
https://courses.cs.washington.edu/courses/cse415/14sp/notes/Python.pdf
Install Anaconda2 (python2) https://store.continuum.io/cshop/anaconda/
Must execute conda update. Or just update the launcher "conda update launcher"
Anaconda launcher stuck (start spyder manually, then the launcher may work)
anaconda by default installs jupyter-notebook (must first do: conda activate py36 ; then inside this virtual env, do jupyter-notebook )
Install Anaconda3 in Ubuntu 18.04:
wget https://repo.anaconda.com/archive/Anaconda3-2019.10-Linux-x86_64.sh
bash ~/Download/Anaconda3-2019.10-Linux-x86_64.sh (will install to /home/username/anaconda3)
Run conda init from the installer as recommended. ~/.bashrc is modified. source ~/.bashrc ; which python (must be the anaconda3 one); python -V
Run conda config --set auto_activate_base False , if does not want to activate (base) venv by default.
MUST RUN conda config --set auto_activate_base false, because doing vncserver -geometry 1440x900 :1 will NOT properly work in conda activate virtual environment.
Else for environment setting, see below.
If using imgcat, then it is possible to show plot inline in the terminal (iterm2) even though the python3 is executed remotely.
Manage environment in Anaconda:
https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html
Create environment py36
conda create -n py36 python=3.6 anaconda
Install different version of python here, i.e., python2.7 and python3.6
Change the environment whether to use python2 or python3
conda info --envs
The output is:
base:/Applications/anaconda2
py36:/Applications/anaconda2/envs/py36
python -V
source conda activate py36 (it looks that after doing source conda activate, the revision number in svn-prompt is NOT shown anymore <= FIXED (the problem is due to LANG env variable (which become japanese)))
python -V
source conda deactivate
conda list : list the "conda/pip installed" package and its version
Python IDE (elpy) in Emacs for MacOS only:
https://realpython.com/emacs-the-best-python-editor/ <== Use ~/.emacs.d/init.el, instead of ~/.emacs. Also teach about using auto installation of packages if not existing (see myPackages)
https://org-technology.com/posts/emacs-elpy.html
conda activate py36 (conda deactivate)
(sudo) pip install jedi rope flake8 importmagic yapf autopep8 ipdb virtualenv black
Error during installing flake8, so do: pip install flake8 --ignore-installed entrypoints
https://qiita.com/friendtree16/items/66a40cc758e20f02b11e
jedi backend server is alive when opening python.
In Emacs: M-x package-install RET elpy RET
Check that auto-completion in python-mode is OK.
Do M-x elpy-config in Emacs.
Fix the virtualenv for RPC (/Applications/anaconda2/envs/py36)
pip install --upgrade jedi
Check with M-x elpy-config
C-c C-c Execute python code (open ipython shell. To run a script from inside ipython: run ./sieve.py.
In ipython prompt, usually expression (like, 1+2) to evaluate, or statement (like assignment statement, x = 1) to execute, is input.
However, line magic function (which is looks like shell command line), such as %run, %who, %whos, %ls, %cd, %edit, %lsmagic, %rerun, %matplotlib inline, etc. can be executed also.
% can be omitted if variable with the same name with the magic function does not exist.
To execute shell command in ipython, prepend with !, for example !ping www.bbc.co.uk
See, https://ipython.readthedocs.io/en/stable/interactive/tutorial.html
cell magic function spans some lines
The line(%)/cell(%%) magic function differs with python ordinary function (such as math.sin(x), print(x)) in below sense:
The argument to line/cell magic functions do NOT need parentheses: %run sieve.py (not %run(sieve.py)), %matplotlib inline, %cd dir ...
line/cell magic functions generally does NOT return value:
For example, %ls dir, %rerun has no return value.
But for line/cell magic functions that returns value, the return value can be assigned (e.g., w = %who_ls)
%who?, %who_ls? for help, %who??, %who_ls?? for more detailed help.
Help with ? also available for user-defined function, e.g., print_twice? print_twice?? math? math??
In ipython, line/cell magic functions that only display state etc., will NOT have Out[123] prompt.
However, line/cell magic functions (such as %who_ls) or other functions that RETURN something, will have Out[124] prompt.
void fubction returns None (type None class)
str is immutable object
a = 'abc'
b = 'abc' # or b = a
a is b # returns True
The identity of a and b; id(a) and id(b) is the same. a and b is identical.
If we continue with:
a = a + 'def'
a is b # returns False. id(a) is NOT the same with id(b). b is still 'abc'
Now, a is a new object with value 'abcdef'. The new a object differ with the original a (which is b).
list is mutable object
m = list('abc') # create list from string 'abc'
n = list('abc') # Be careful not to use n = m, where in this case n and m is identical.
n is m # returns False
m and n has equal value, that is, m and n is equivalent. But m and n is NOT identical.
id(m) and id(n) is different
n == m # returns True, because n is equivalent to m
n is m # However, n is m returns False, because n is NOT identical to m
== checks equivalency, is checks identicalness.
If identical then equivalent. However, the reverse is NOT necessarily true.
Equivalent means having the same value. Identical means having/pointing to the same object.
If we do m.append('de'), then the element of m will increase by 1. But id(m) does NOT change.
m = m + ['fg', 'jkl'] # In this case, just like the immutable string case a = a + 'def', by this assignment statement now id(m) will change!
All variable in function's def is LOCAL. To reassign a global variable, count, inside the function's def, declare: global count.
Tuple is immutable (like string):
tu = 1, 2 # Does not need to use parenthesis. This is the same with tu = (1, 2).
list use [], tuple use () or none.
Single element tuple must be ended with ",":
type( ('a') ) # ('a') is a str. A value in parenthesis is NOT necessarily a tuple.
type( ('a',) ) # ('a',) is a tuple.
Can use tuple() to create tuple.
a = tuple('bat')
b = tuple('bat')
a == b # Returns True. So have same value.
a is b # Returns False. BUT different object.
Tuple is comparable.
Swap tuple:
a, b = b, a
Assign using tuple format:
a, b = 1, 2
type(a), type(b) # Both returns int. a is 1, b is 2.
a, b = [0, 1, 2, 3], "Author's note"
addr = 'monty.master@python.or.jp'
user, domain = addr.split('@')
Tuple as return value:
t = divmod(7, 3)
quot, rem = divmod(7, 3)
return min(t), max(t)
Variable length argument tuples (gather and scatter):
* gathers arguments into tuple:
def printall(*args): print(args)
def printall(*args): for i in args: print(i)
* also scatters tuple to multiple arguments
divmod( (7,3) ) # Error
divmod( *(7, 3) ) # * scatters tuple (7, 3) to 2 arguments, 7 and 3.
C-c C-d Show document of symbol at cursor
M-. Go to definition
C-c C-e Multiedit/rename symbol at cursor
C-c C-n flycheck-next-error
C-c C-p flycheck-previous-error
C-c C-r f (M-x elpy-format-code) Tidying up buffer/region
To rename/refactor variable name in C/C++. use srefactor.
VERY SLOW. DO NOT USE.
Press M-RET (Meta/Esc + return) at variable name to be renamed.
BUT, NOT at the declaration/definition of the variable.
Debugging (pdb) with elpy:
Test code:
Text Box
# -*- coding: utf-8 -*-
MAX_PRIME = 100
sieves = [True] * MAX_PRIME
for i in range(2, MAX_PRIME):
if sieves[i]:
print(i)
for j in range(i * i, MAX_PRIME, i):
sieves[j] = False
Debugging python in general: https://realpython.com/python-debugging-pdb/
Modify source code: import pdb; pdb.set_trace()
Modify source code: In Python 3.7, the above is breakpoint()
Without modifying source code: python3 -m pdb asdf.py arg1 arg2
p filename : print the current value of the variable filename
n : next (step over)
s : step (step into)
q : exit debugger
ll : Long List if inside a function body, ALSO the current position of execution (before ->)
l : list code snippet (11 lines around the current line or continue previous listing of ll)
l . : always list 11 lines around the current line
pp var : pretty print
[Enter] : repeat the last command
b : list all break points
enable/disable/cl(ear) breakpoint-number
c : continue until break
b 7 : break at line 7
b 6, sieves[i] : break at (before executing / start of) line 6 if sieves[i] is true.
b util.get_path, not filename.startswith('/')
b util:5, not head.startswith('/')
a : print argument list to the current function
unt [linenumber] : execute until linenumber or 1 line after current line
display vars : display values of some variables when breakpoint is reached. We can see the change of the value. "display i", then "display j". Combine with the usage of brakepoint "b 7" and continue "c".
w : print stack trace (the recent one at bottom). The ">" indicates the current stack frame where variables are dereferenced (using command p, etc.). Good to find the caller of the current function.
h w : help of w
u, d : change current stack frame up (older) or down (newer) when breakpoint is hit.
In Emacs, open python file (example.py), then execute M-x pdb:
If the python file starts with #!/usr/bin/env python3, then type example.py arg1 arg2
If the python file does NOT start with shebang/shabang, then type python3 -m pdb example.py arg1 arg2
The pdb console and the python source is linked, so no need to do ll or l or l . , because there is an arrow in the source file frame that shows where the current code to be executed.
Doxygen in python:
https://qiita.com/tomopiro/items/407e3bf5c08342cd4867
See example here:
Text Box
# -*- coding: utf-8 -*- """ @file ファイル名 @version バージョン番号 @author 作成者・更新者 @date パッケージの作成日時・更新日時 @brief 簡単な説明 @details 詳細な説明 @warning 警告メッセージ @note メモ """""" @package パッケージの名前 @version バージョン番号 @author 作成者・更新者 @date パッケージの作成日時・更新日時 @brief 簡単な説明 @details 詳細な説明 @warning 警告メッセージ @note メモ """class Hoge(object): """ @class クラス名 @brief クラスの簡単な説明 @details クラスの詳細な説明 @warning 警告メッセージ @note メモ """ def __init__(self,arg): """ @fn __init__() @brief Hogeクラスの初期化関数 @param arg argの説明 @return None 戻り値なし @details 関数の詳細 @warning 警告メッセージ @note メモ """ self.arg = arg def fuga(self,a:float,b:float) -> tuple: """ @fn fuga() @brief @param a オペランド(float) @param b オペランド(float) @retval sum_ab aとbの和(float) @retval sub_ab aとbの差(float) @details 詳細な説明 @warning 警告メッセージ @note メモ """ sum_ab = a+b sub_ab = a-b return sum_ab,sub_ab
If the above code is saved as doxysample.py, then the help() function can be used like below:
https://www.journaldev.com/22892/python-help-function
Inside python3 (conda activate py36; python)
>>> exec(open("python_help_examples.py").read())
>>> globals()
>>> help(Hoge)
>>> help(Hoge.fuga)
DO NOT use shebang on the script, because maybe the environment is windows/mac/linux.
If need to use:
#!/usr/bin/env python2
#!/usr/bin/env python3
DO NOT use: #!/usr/bin/python
So, use the virtual environment and explicitly call python.
source conda activate py36
python2 myscript.py
python3 myscript.py
source conda deactivate
The above python can be python2 or python3 depends on the activated environment.
Use python in REPL (Read Evaluate Print Loop) fashion:
https://stackoverflow.com/questions/5280178/how-do-i-load-a-file-into-the-python-console
https://stackoverflow.com/questions/633127/viewing-all-defined-variables
From outside python, call python shell with -i (interactive) and execute the script:
python3 -i filename.py arg1
dir()
globals()
locals()
ipython3 -i filename.py arg1
python -i filename.py arg1
ipython -i filename.py arg1
Inside python shell execute the script:
exec( open( "filename.py" ).read() )
dir()
globals()
locals()
help( myplot )
For python, keep using TABS in case of file that is already using TABS. But, the default is using space:
See: https://emacs.stackexchange.com/questions/32140/python-mode-indentation?rq=1
To untabify the tabs to space:
1. Select all region
2. M-x untabify
Installation of Anaconda in Linux:
http://askubuntu.com/questions/505919/installing-anaconda-python-on-ubuntu
Set PATH? No need. The installer bash *.sh will perform the path addition.
Now, in ~/.zprofile, add: . /Applications/anaconda2/etc/profile.d/conda.sh
In Ubuntu, ~/.bashrc will be edited by the Anaconda installer.
MUST set first the virtual environment (MAC case) (????):
conda info --envs
conda activate base:conda update --prefix /Applications/anaconda2 {anaconda, ...}
conda activate py36:
conda update --prefix /Applications/anaconda2/envs/py36 {anaconda, jupyter, ...}
see also ~/.jupyter/migrated
conda update
conda update --prefix /home/username/anaconda anaconda
conda update --prefix /home/username/anaconda ipython
conda update --prefix /home/username/anaconda conda
conda update --prefix /home/username/anaconda spyder
conda update jupyter
conda update notebook
conda update -n base -c defaults conda (? But seems needed for the first time.)
conda update anaconda-navigator
conda update pip
conda update --all
conda activate base ; conda update --all
conda activate py37 ; conda update --all
See this to update all package in python:
conda activate {base,py37}
http://qiita.com/manji-0/items/d3d824d77c18c2f28569
Maybe better to do just the conda update --all above, without doing below?
pip list --outdated | awk '{print $1}' | xargs pip install -U
make sure that pip is /Applications/anaconda/bin/pip (which pip)
pip install pip-review
pip-review
pip-review --auto
pip-review --interactive
pip list
https://github.com/Russell91/pythonpy
pip install pythonpy
pip install -U pythonpy
awk -F'\t' '{print $5}' diffres.txt | py --ji -l 'min(l), max(l), len(l), numpy.mean(l), numpy.median(l), numpy.std(l), numpy.count_nonzero(l)'
py 'range(4)'; py '[range(1,4)]'
py 'numpy.arange(1.0, 5.6, 0.5)'; py '[numpy.arange(1.0, 5.6, 0.5)]'
py 'numpy.sin(numpy.pi/2.0)'; py 'numpy.exp(1)'
py 'numpy.arange(1,5,0.5)' | py -x 'float(x)*2.0'; py 'numpy.arange(1,5,0.5)' | py --ji -l 'numpy.mean(l)'
echo $'3,2\n5,1' | py -x 'float(x.split(",")[0])/float(x.split(",")[1])'
echo $'a,2\nb,1' | py -l 'sorted(l, key=lambda x: x.split(",")[1])'
cat diffres.txt | py -x 'float(x.split("\t")[0]), float(x.split("\t")[3])'
cat diffres.txt | py -x 'float(x.split("\t")[3])*100.0 / float(x.split("\t")[0])'
The above with awk:
awk -F'[\t,]' 'function abs(v) {return v<0 ? -v : v} {print abs(100.0*$4/$1-$5)}' diffres.txt | py --ji -l 'min(l), max(l), numpy.mean(l), numpy.median(l), len(l), numpy.count_nonzero(l)'
http://stackoverflow.com/questions/25472840/anaconda-launcher-links-dont-work
According to the above, it is better to use navigator, not launcher
conda install anaconda-navigator
How to use? Use the command "ipython", "ipython console", "ipython qtconsole" or "spyder"
python-mode.el:
http://www.emacswiki.org/emacs/ProgrammingWithPythonModeDotEl
Easy way to use Python interface of OpenCV2:
Copy /opencv2/install/dir/build/python/2.7/x64(depends on the anaconda)/cv2.pyd to /Anaconda/Lib/site-packages.
Set PATH to OpenCV2 build bin directory.
Test in Anaconda: import cv2, print cv2.__version__
http://stackoverflow.com/questions/23119413/how-to-install-python-opencv-through-conda
http://vision.is.tohoku.ac.jp/~kyamagu/en/teaching/seminar1-fall2014/01-python-setup.html
Looks NOT working when doing:
import cv2
image = cv2.imread("C:/appsiv/programming/opencv2.4.10/sources/samples/c/fruits.jpg")
cv2.imshow("window", image)
It seems that, need to add cv2.waitKey()
http://stackoverflow.com/questions/21810452/cv2-imshow-command-doesnt-work-properly-in-opencv-python
But works with "%run cameraview.py", because we have the cv2.waitKey() here.
The hard way, is to rebuild with Python interface valid:
http://answers.opencv.org/question/17536/install-opencv-for-anaconda-ipython/
Tutorial:
https://store.continuum.io/static/img/Anaconda-Quickstart.pdf
https://scipy-lectures.github.io/
Magic function: %run, %whos, %cd, %timeit, %cpaste %debug. Check with %magic. By default the setting 'automagic' is enabled so, no need to prefix with %. (%run?: help of run)
alias to check the aliased unix command.
Use tab to auto complete. For example, type x = 10. Then x.<TAB> to check a lot of attribute of x.
To initiate plotting from inside IPython:
%pylab inline
OR %matplotlib inline
Better use scipy.linalg than numpy.linalg
Random walk example. Broadcasting example (distance between cities)
Inverse matrix calculation:
Open ipython
%pylab inline
%matplotlib inline
from scipy import linalg (as la, maybe?)
import numpy as np
mat = np.array( [ [1, 2], [3,4] ] )
linalg.det(mat)
imat = linalg.inv(mat)
np.allclose( np.dot( mat, imat ), np.eye(2) )
Load matrix data from text file:
cat matrix.txt:
; Header 1
; Header 2
; Header 3
1 11
3 71
5 10
-3.0 10.2
-17 54.3
In ipython: A = 'numpy.loadtxt( "matrix.txt", skiprow = 3)'
py 'numpy.loadtxt( "matrix.txt", skiprow = 3)'
py 'numpy.transpose( numpy.loadtxt( "matrix.txt", skiprow = 3) )'
py 'numpy.array([[1,5], [12, 13]])'
py 'scipy.linalg.det( numpy.array( [ [1, 5], [12, 13] ] ) )'
py 'scipy.linalg.inv( numpy.array( [[1,5],[12,13]] )'
py 'numpy.dot( numpy.array([[1,5],[12,13]]), scipy.linalg.inv( numpy.array([[1,5], [12, 13]]) ) )'
py 'numpy.allclose( numpy.dot( numpy.array([[1,5],[12,13]]), scipy.linalg.inv( numpy.array([[1,5], [12, 13]]) ) ), numpy.eye(2) )'
http://pandas.pydata.org/pandas-docs/stable/visualization.html
Python plotting:
http://bicycle1885.hatenablog.com/entry/2014/02/14/023734
ipython, but better use spyder (ipython's IDE) to avoid plt.show() bug specification below.
import numpy as np
import matplotlib.pyplot as plt
x=np.arange(0, 2*np.pi, 0.05)
y=np.sin(x)
plt.plot(x,y)
plt.show(): Unfortunately, cannot be redisplayed after closed...
https://stackoverflow.com/questions/50452455/plt-show-does-nothing-when-used-for-the-second-time
Workaround for ipython (not jupyter) BUT, DO NOT close the figure with Command+Q !!!!:
import matplotlib.pyplot as plt
fig = plt.figure() OR fig = plt.figure(figsize=(10,10))
plt.plot([1,2,4,16])
plt.show()
plt.show() # Show nothing
fig.show() # Always showing plot
A typical usage of plot is usually refine the plot at each display. So, use function, for example like below:
import matplotlib.pyplot as plt
year = [1500, 1600, 1700, 1800, 1900, 2000]
pop = [458, 580, 682, 1000, 1650, 6127]
In [4]: def my_plot(year, pop):
...: plt.plot(year, pop)
...: plt.xlabel("year")
...: plt.ylabel("population")
who
my_plot(year, pop)
plt.show()
my_plot(year, pop)
plt.show()
my_plot(year, pop)
plt.title("demographics plot")
plt.show()
To enable plot inline ipython in iterm2: https://rensdimmendaal.com/notes/howto-plot-inline-ipython-iterm2/
bash$ or zsh% conda activate py36
bash$ or zsh% pip install imgcat
bash$ or zsh% ipython
In[1] import matplotlib
In[2] matplotlib.use("module://imgcat") <== In[1] and In[2] are replacement of %matplotlib inline
In[3] import matplotlib.pyplot as plt
... plt.plot([1,2,4,16]) plt.plot([2,5,-3,10])
In[] plt.show()
Load text data then plot:
import numpy as np
import matplotlib.pyplot as plt
dt = np.loadtxt("textdata.txt")
type(dt)
np.size(dt) == wc -l textdata.txt
plt.plot(dt[0:100000])
plt.show()
How to plot wavfile data:
SPYDER
import numpy as np
import matplotlib.pyplot as pp
import scipy.io.wavfile as siow
ar = [1,2,3,4,5]
pp.plot(ar)
wavfile = "asdf.wav"
rate, data = siow.read(wavfile)
pp.plot(data)
print( len(data) )
print( np.mean(data) )
print( np.max( abs(data) ) )
print( np.count_nonzero(data) )
How to load TEXT data with multi columns separated by "\t" (tab) to python list:
Actually, must do split("\t"), but Qt bug prevent to input backslash \ to ipython (http://nsbio.hatenablog.com/entry/2013/12/25/232456). So, just use split().
Tech 1: map(float, "123 456".strip().split( or "\t" (but not usable inside ipython) )) output [123.0, 456.0].
map( float, "123 456 789".strip().split() )
py 'map( float, "123\t456".strip().split("\t") )'
Assume data is 123\t456\t789 for each line.
cols = zip( *[map( float, line.strip().split() ) for line in open("diffres.txt", 'r')] )
cols[0], cols[1], ... contains the numbers.
List comprehension:
pupu = [x for x in range(5) if x!=2]
pupu = [x if x!=2 else -1 for x in range(5)]
pupu = [ mat[r][c] for r in range(rowNum) for c in range(colNum) ]
Get short / int16 data from wav file:
import scipy.io.wavfile as siow
rate, data1 = siow.read("tst_aa.wav")
Get short / int16 data from raw file (data part of wav file, created with "sox tst_aa.wav tst_aa.raw":
import numpy as np
import struct
data2 = []
with open("tst_aa.raw", "rb") as f:
packed_data = f.read(2)
while packed_data:
data2.extend( struct.unpack( "h", packed_data ) )
packed_data = f.read(2)
f.close()
data2 = np.asarray( data2, dtype=np.int16 )
Get normalized TEXT wave data (data part of wav file, created with "sox tst_aa.wav tst_aa.dat":
data3 = [ 32768.0*float(line.strip().split()[1]) for line in open("tst_aa.dat", 'r') if line.strip()[0]!=';']
import numpy as np
data3 = np.asarray( data3, dtype=np.double )
How to generate wave file audio from a numpy array:
http://stackoverflow.com/questions/10357992/how-to-generate-audio-from-a-numpy-array
SPYDER
Sampling freq = 8000 Hz (sampling period = 1/8000 sec). Pulse train = 125 Hz (= pulse interval/period = 0.008 sec).
So for 1 sec of 8000 samples, at 0, 64 (<= 0.008 sec / (1/8000 sec), 128, ... is NON zero pulse.
import numpy as np
data = np.zeros(8000)
data[::64] = 0.5
import matplotlib.pyplot as pp
pp.plot(data)
pp.plot(data[0:640])
pp.stem( data[0:640], "-")
pp.stem( data[0:640], "--")
pp.stem( data[0:640], "-.")
Export data as wav file. Need to normalize!
import numpy as np
from scipy.io.wavfile import write
scaled = np.int16( data / np.max( np.abs(data) ) * 32767 )
write( "pulse_train.wav", 8000, scaled )
Some correlation calculation:
Pearson correlation (NO time-shift) : https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.pearsonr.html
The first element is the correlation between the 2 arrays:
$ py 'scipy.stats.stats.pearsonr( numpy.array([1,4,6,2,-3]), numpy.array([5,1,0,2,0]) )'
-0.106654237564
0.864461498383
Correlation coefficient of some arrays (NO time-shift): https://docs.scipy.org/doc/numpy/reference/generated/numpy.corrcoef.html
[0][0] is the correlation between itselves, [0][1] is the correlation between array[0] and [1], [1][2] is the correlation between array[1] and [2], ...:
$ py 'numpy.corrcoef( [numpy.array([1,4,6,2,-3]), numpy.array([1,4,6,2,-3]), numpy.array([5,1,0,2,0])] )'
[ 1. 1. -0.10665424]
[ 1. 1. -0.10665424]
[-0.10665424 -0.10665424 1. ]
Signal autocorrelation (WITH time-shift): https://docs.scipy.org/doc/numpy/reference/generated/numpy.correlate.html
The largest correlation is autocorrelation without time-shift (exactly between itself) (5 in -5 is the length of the signal/array, i.e., len(array):
$ py 'numpy.correlate( numpy.array([1,4,6,2,-3]), numpy.array([1,4,6,2,-3]), mode="full" )'
-3
-10
-4
34
66
34
-4
-10
-3
$ py 'numpy.correlate( numpy.array([1,4,6,2,-3]), numpy.array([1,4,6,2,-3]), mode="full" )[-5:]/66.0'
1.0
0.515151515152
-0.0606060606061
-0.151515151515
-0.0454545454545
Jupiter notebook: https://www.google.co.jp/webhp?sourceid=chrome-instant&ion=1&espv=2&ie=UTF-8#q=jupyter%20notebook
http://qiita.com/taka4sato/items/2c3397ff34c440044978#jupyter%E3%81%AE%E4%BD%BF%E3%81%84%E6%96%B9
itertools usage:
import itertools
string = "123456789"
numbers = [ int("".join(x)) for x in itertools.permutations(string) if int("".join(x)) % 99 == 0]
numbers.sort()
print numbers[0]
len(numbers)
Be careful about copy, shallow copy, deep copy, especially in list.
If the array can is an N-dimensional matrix, then numpy.copy will do copy, instead of referencing.
import numpy as np
a = [ [4,5,6], [1,2,3] ]
a_arr = np.asarray( a, dtype=np.float64 )
b_arr = a_arr
c_arr = np.copy( a_arr )
b_arr[0][0] = -1
a_arr[0][0] => -1
c_arr[0][0] => 4
Using matrix is a little bit different:
a_m[0,1], instead of a_m[0][1]
For np.array:
import numpy as np
a = np.array( [[4,5,6], [1,2,3]], dtype=np.float64 )
b = np.copy(a)
b[0] = np.array( [-1,-2.2,-7], dtype = np.float64 )
Documenting with docstring:
docstring Tutorial: https://realpython.com/documenting-python-code/
Use Numpy/Scipy's numpydoc docstring format: https://numpydoc.readthedocs.io/en/latest/format.html
Use sphinx to create the html:
Sphinx Tutorial: https://qiita.com/futakuchi0117/items/4d3997c1ca1323259844
Below mutation of mutable object list is OK, and will be shared inside the caller:
In [2]: def testfun1(myparam: list):
...: print(id(myparam))
...: myparam[0] = 'new'
...: print(id(myparam))
...:
In [5]: mylist1 = [0,1,2]
In [6]: mylist1
Out[6]: [0, 1, 2]
In [7]: id(mylist1)
Out[7]: 4419243056 <==== SAME
In [8]: testfun1(mylist1)
4419243056 <==== SAME
4419243056 <==== SAME
In [9]: mylist1
Out[9]: ['new', 1, 2]
Inside below callee, the variable that stores the caller mutable object list is reassigned to a new object inside the callee, so the caller knows nothing about the change:
In [10]: def testfun2(myparam: list):
...: print(id(myparam))
...: myparam = ['new', 'list', 'local only', "INSIDE CALLER DON'T CHANGE"]
...: print(id(myparam))
...:
In [11]: mylist2 = ['old', 'list', 'in caller']
In [12]: print(id(mylist2))
4420589760 <==== SAME
In [13]: print(mylist2)
['old', 'list', 'in caller']
In [14]: testfun2(mylist2)
4420589760 <==== SAME
4420588560 <------------------------------ CHANGED, BECAUSE LOCALLY REASSIGNED
In [15]: print(mylist2)
['old', 'list', 'in caller']
As below, the integer value 0, 1, 2, or 3 is stored in their own places. Variable `num` is CHANGED DYNAMICALLY to the reference to place where 0, 1, 2, or 3 is stored. Interesting...
In [1]: num = 0
In [2]: print(id(num))
4467905632
In [3]: while num < 3:
...: print('num_bef = ', num, id(num))
...: num = num + 1
...: print('num_aft = ', num, id(num))
...:
num_bef = 0 4467905632
num_aft = 1 4467905664
num_bef = 1 4467905664
num_aft = 2 4467905696
num_bef = 2 4467905696
num_aft = 3 4467905728
In [4]: print(id(num))
4467905728
In [5]: num = 0
In [6]: print(id(num))
4467905632
In [7]: while num < 3:
...: print('num_bef = ', num, id(num))
...: num = num + 1
...: print('num_aft = ', num, id(num))
...: print('')
...:
num_bef = 0 4467905632
num_aft = 1 4467905664
num_bef = 1 4467905664
num_aft = 2 4467905696
num_bef = 2 4467905696
num_aft = 3 4467905728
In [8]: id(0)
Out[8]: 4467905632
In [9]: id(1)
Out[9]: 4467905664
In [10]: id(2)
Out[10]: 4467905696
In [11]: id(3)
Out[11]: 4467905728
http://introtopython.org/classes.html
Module's name is all in lower-case and use underscore (_) to separate words when necessary. ClassName is in CamelCase.
Import Python Standard Library first (alphabetical order, one module in one line) then import 3rd party library.
Module is actually a file in python. For example a file mymodule.py (the filename is the module-name with .py extension), i.e. module-name: mymodule, contains MyClass1 and MyClass2. To use this classes, import the module, then use the class like this:
import mymodule
obj1 = mymodule.MyClass1()
obj2 = mymodule.MyClass2()
In case typing my module is too long, use import mymodule as mm:
import mymodule as mm
obj1 = mm.MyClass1()
obj2 = mm.MyClass2()
If only MyClass1 is needed, then use from filename/modulename import ClassName. The ClassName can then be used directly without the modulename followed by dot notation, then ClassName.
from mymodule import MyClass1
obj1 = MyClass1()
Example 1:
# Standard python module (Python Standard Library)
import argparse
import os
import sys
import typing
# Third-party python module
import pymodmod
...
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
... -> typing.Tuple(dict, int, int, int, int)
Example 2 (the same as example 1, but different way to import and use):
# Standard python module (Python Standard Library)
import argparse
from argparse import RawDescriptionHelpFormatter
import os
import sys
from typing import Tuple
# Third-party python module
import pymodmod
...
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)
... -> Tuple(dict, int, int, int, int)
conda activate py36 ; jupyter-notebook
Tutorial:
Notebook not trusted:
jupyter trust notebook-name.ipynb
jupyter-notebook notebook-name.ipynb OR jupyter notebook notebook-name.ipynb
Ctrl-Z ; bg ; fg
To run cell, click Run, or Control+Enter. If previous run cell already import, then no need to import again in the later cell. NOTE, the previous cell which imports MUST be run first.
How to insert image:
Change Cell to Markdown, then add # Problem, etc.
Jupyter Notebook Menu: Edit -> Insert image, select Image. Press Control + Enter. To edit markdown cell, double click the cell.
Control+Enter: run cell. Shift+Enter: run cell, then go to below cell OR add below cell.
How to insert latex equation:
In the "CODE" cell, write %%latex on top, then write the equation.
Cell contents:
%%latex
\begin{align}
c = \sqrt{a^2 + b^2} \\
\nabla \times \vec{\mathbf{B}} -\, \frac1c\, \frac{\partial\vec{\mathbf{E}}}{\partial t} & = \frac{4\pi}{c}\vec{\mathbf{j}} \\
\nabla \cdot \vec{\mathbf{E}} & = 4 \pi \rho \\
\nabla \times \vec{\mathbf{E}}\, +\, \frac1c\, \frac{\partial\vec{\mathbf{B}}}{\partial t} & = \vec{\mathbf{0}} \\
\nabla \cdot \vec{\mathbf{B}} & = 0
\end{align}
How to print to pdf from command line (also doable from menu: File)
jupyter-nbconvert Untitled.ipynb --to pdf
Must do this first: https://qiita.com/yasudadesu/items/7b4edec4498e425bf50c
If the order of the embedded image is wrong, try to update the conda.
Source control and html rendering:
The ipynb source file can be stored in github.
Use jupyter's nbviewer (https://nbviewer.jupyter.org/) to render the above github stored notebook.
To exclude the source code from being printed to pdf:
jupyter nbconvert Untitled.ipynb --to=pdf --TemplateExporter.exclude_input=True
To remove heading numbers from being printed to pdf:
In the Markdown cell, append {-} or {.unnumbered}: # Heading {-}
To change the notebook's title when saved to pdf:
From Jupyter notebook: Edit -> Edit Notebook Metadata then add a new field like:
Text Box
"title": "This is the new title",
"authors": [
{
"name": "Ivan"
},
{
"name": "Ivan 2"
}
],
To change the notebook's date when saved to pdf, first need to modify the base.tplx and jsarticle.tplx in order to accept date metadata. The date metadata can be added from Edit Notebook Metadata.
Text Box
%% Add in base.tplx and jsarticle.tplx
((* block date *))
((* if nb.metadata["date"]: *))
\date{((( nb.metadata["date"] )))}
((* endif *))
((* endblock date *))
((* block title -*))
((*- if nb.metadata.get("latex_metadata", {}).get("title", ""): -*))
\title{((( nb.metadata["latex_metadata"]["title"] )))}
((*- else -*))
((*- set nb_title = nb.metadata.get('title', '') or resources['metadata']['name'] -*))
\title{((( nb_title | escape_latex )))}
((*- endif *))
((*- endblock title *))
((* block author *))
((*- if nb.metadata.get("latex_metadata", {}).get("author", ""): -*))
\author{((( nb.metadata["latex_metadata"]["author"] )))}
((*- else -*))
((* if 'authors' in nb.metadata *))
\author{((( nb.metadata.authors | join(', ', attribute='name') )))}
((* else *))
\author{Johanes Ivan Setiawan}
((* endif *))
((*- endif *))
((* endblock author *))
%% Edit -> Edit notebook metadata:
"date": "circa \\today",
"date": "\\today 頃にしようかな",
"latex_metadata": {
"author": "イファン the \\TeX-er\\thanks{捕鯨大学 \\texttt{aaaa@hoge-u.ac.jp}, URL: \\texttt{http://www.hoge-u.ac.jp/\\~{}aaaa2}} \\and Hoge 河豚$\\frac{\\pi}{\\sqrt{7}}$ Fuga\\thanks{マサチューセッツ工科大学}",
"title": "これは \\LaTeX メタデータの日本語のタイトルですよ \\\\ これはサブタイトル maximize function $P(a)$"
},
How to plot inequality:
https://fillplots.readthedocs.io/en/latest/examples.html (NOT working)
https://stackoverflow.com/questions/17576508/python-matplotlib-drawing-linear-inequality-functions
plt.axis('scaled')
plt.grid(True)
Add above xlim, ylim
To change the %matplotlib inline figure resolution on the notebook and the font in the image:
https://stackoverflow.com/questions/25412513/inline-images-have-low-quality
The dpi value should be from 150-300. LaTeX font is "cm" (computer modern)
Text Box
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['figure.dpi']= 300
matplotlib.rcParams['mathtext.fontset'] = 'cm' # or 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
def annotate_dim(ax,xyfrom,xyto,text=None,fontsize=15, color='black', xoffset=0.0, yoffset=0.0):
if text is None:
text = str(np.sqrt( (xyfrom[0]-xyto[0])**2 + (xyfrom[1]-xyto[1])**2 ))
ax.annotate("",xyfrom,xyto,arrowprops=dict(color=color, arrowstyle='<->', shrinkA=0, shrinkB=0))
# ax.text((xyto[0]+xyfrom[0])/2+xoffset,(xyto[1]+xyfrom[1])/2+yoffset,text,fontsize=fontsize,color=color)
ax.text((xyto[0]+xyfrom[0])/2+xoffset,(xyto[1]+xyfrom[1])/2+yoffset,text,fontsize=fontsize,color=color,
bbox=dict(facecolor='white', edgecolor='none', boxstyle='round,pad=0.02'))
# plot the feasible region
d = np.linspace(-2,2,1000)
x,y = np.meshgrid(d,d)
plt.imshow( ( (y>=0) & (y<=1) & (x>=0) & (x<=1) & (y<=1.5-x) & (y>= 0.5-x) &
(y>=0) & (y<=1) & (x>=0) & (x<=1) & (y<=x+0.5) & (y>= x-0.5) & (x<=0.65) ) .astype(int) ,
extent=(x.min(),x.max(),y.min(),y.max()),origin="lower", cmap="Greys", alpha = 0.1);
# plot the lines defining the constraints
x = np.linspace(-2, 2, 2000)
y1 = 1.5 - x
y2 = 0.5 - x
y3 = x + 0.5
y4 = x - 0.5
aa=0.65
# Make plot
plt.plot(x, y1, label=r'$x+y \leq 1.5$')
plt.plot(x, y2, label=r'$0.5 \leq x+y$')
plt.plot(x, y3, label=r'$-0.5 \leq x-y $')
plt.plot(x, y4, label=r'$x-y \leq 0.5$')
plt.axvline(x=aa, label=r'$x=a; (0 < a=0.65 \leq 0.5)$', color='black')
plt.axis("scaled")
# plt.grid(True)
plt.xlim(0,1)
plt.ylim(0,1)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel(r'$x$', fontsize=15)
plt.ylabel(r'$y$', fontsize=15)
plt.text(-0.1,-0.1, 'A', fontsize=15)
plt.text(aa-0.01,-0.1, 'B', fontsize=15)
plt.text(aa-0.01,1.05, 'C', fontsize=15)
plt.text(-0.05,1.05, 'D', fontsize=15)
plt.text(-0.08,0.48, 'E', fontsize=15)
plt.text(0.48,-0.08, 'F', fontsize=15)
plt.text(1.01,0.48, 'G', fontsize=15)
plt.text(0.48,1.03, 'H', fontsize=15)
plt.text(aa+0.015,aa-0.5-0.05, 'K', fontsize=15)
plt.text(aa+0.015,1.5-aa+0.01, 'L', fontsize=15)
annotate_dim(plt.gca(),[0,0.04],[aa,0.04],'$a$', fontsize=15, color='blue', xoffset=-0.02, yoffset=0.02)
annotate_dim(plt.gca(),[aa,0.04],[1,0.04],'$1-a$', fontsize=15, color='blue', xoffset=-0.08, yoffset=0.02)
annotate_dim(plt.gca(),[aa-0.04,aa-0.5],[aa-0.04,1.5-aa],'$2(1-a)$', fontsize=15, color='blue', xoffset=-0.25, yoffset=-0.02)
plt.show()
Other: plt.legend(fontsize=14), plt.tick_params(labelsize=12)
Text Box
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['figure.dpi']= 150
matplotlib.rcParams['mathtext.fontset'] = 'cm' # or 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
plt.subplot(1, 2, 1)
xmy=np.arange(-1, -0.5, 0.01)
cosxmy=np.cos(np.pi*xmy)
plt.plot(xmy,cosxmy, "b--")
xmy=np.arange(-0.5, 0.5, 0.01)
cosxmy=np.cos(np.pi*xmy)
plt.plot(xmy,cosxmy, "r-")
xmy=np.arange(0.5, 1.0, 0.01)
cosxmy=np.cos(np.pi*xmy)
plt.plot(xmy,cosxmy, "b--")
plt.xlabel("$(x-y)$", fontsize=16)
plt.ylabel("$\cos(\pi(x-y))$", fontsize=16)
plt.axis('scaled')
plt.grid(True)
plt.tick_params(labelsize=12)
plt.subplot(1, 2, 2)
# plot the feasible region
d = np.linspace(-2,2,1000)
x,y = np.meshgrid(d,d)
plt.imshow( ( (y>=0) & (y<=1) & (x>=0) & (x<=1) & (y<=x+0.5) & (y>= x-0.5) ) .astype(int) ,
extent=(x.min(),x.max(),y.min(),y.max()),origin="lower", cmap="Greys", alpha = 0.3);
# plot the lines defining the constraints
x = np.linspace(-2, 2, 2000)
y1 = x + 0.5
y2 = x - 0.5
# Make plot
plt.plot(x, y1, label=r'$-0.5 \leq x-y $')
plt.plot(x, y2, label=r'$x-y \leq 0.5$')
plt.axis("scaled")
# plt.grid(True)
plt.xlim(0,1)
plt.ylim(0,1)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel(r'$x$', fontsize=16)
plt.ylabel(r'$y$', fontsize=16)
plt.legend(fontsize=14)
plt.tick_params(labelsize=12)
ax = plt.gca()
ax.text(0.7, 0.84, r'$1.5 \leq x+y$', color='black', fontsize=14,
bbox=dict(facecolor='white', edgecolor='none', boxstyle='round,pad=0.1'))
annotstr = ' ' + r'$0.5 \leq x+y \leq 1.5$' + '\n'
annotstr += ' ' + r'$\cap$' + '\n'
annotstr += r'$-0.5 \leq x-y \leq 0.5$'
ax.text(0.22, 0.42, annotstr, color='black', fontsize=14,
bbox=dict(facecolor='white', edgecolor='none', boxstyle='round,pad=0.2'))
#plt.tight_layout()
plt.tight_layout(rect=[0, 0.03, 2, 0.95])
plt.show()
Numbering and captioning figure:
http://blog.juliusschulz.de/blog/ultimate-ipython-notebook
Must edit the cell metadata. Use "widefigure": false
{
"collapsed": false,
"trusted": true,
"caption": "some caption",
"label": "fig:somelabel",
"widefigure": false
}
Must edit the jsarticle.tplx and base.tplx 's nocaption. Comment out the "nocaption" line, so now we able to captioning.
Add below template above document body of base.tplx:
Text Box
% New mechanism for rendering figures with captions
((*- block data_png -*))
((*- if cell.metadata.widefigure: -*))
((( draw_widefigure_with_caption(output.metadata.filenames['image/png'], cell.metadata.caption, cell.metadata.label) )))
((*- else -*))
((*- if cell.metadata.caption: -*))
((*- if cell.metadata.label: -*))
((( draw_figure_with_caption(output.metadata.filenames['image/png'], cell.metadata.caption, cell.metadata.label) )))
((*- else -*))
((( draw_figure_with_caption(output.metadata.filenames['image/png'], cell.metadata.caption, "") )))
((*- endif *))
((*- else -*))
((( draw_figure_with_caption(output.metadata.filenames['image/png'], "") )))
((*- endif *))
((*- endif *))
((*- endblock -*))
((*- block data_jpg -*))
((*- if cell.metadata.caption: -*))
((*- if cell.metadata.label: -*))
((( draw_figure_with_caption(output.metadata.filenames['image/jpeg'], cell.metadata.caption, cell.metadata.label) )))
((*- else -*))
((( draw_figure_with_caption(output.metadata.filenames['image/jpeg'], cell.metadata.caption, "") )))
((*- endif *))
((*- else -*))
((( draw_figure_with_caption(output.metadata.filenames['image/jpeg'], "") )))
((*- endif *))
((*- endblock -*))
((*- block data_svg -*))
((*- if cell.metadata.caption: -*))
((*- if cell.metadata.label: -*))
((( draw_figure_with_caption(output.metadata.filenames['image/svg+xml'], cell.metadata.caption, cell.metadata.label) )))
((*- else -*))
((( draw_figure_with_caption(output.metadata.filenames['image/svg+xml'], cell.metadata.caption, "") )))
((*- endif *))
((*- else -*))
((( draw_figure_with_caption(output.metadata.filenames['image/svg+xml'], "") )))
((*- endif *))
((*- endblock -*))
((*- block data_pdf -*))
((*- if cell.metadata.widefigure: -*))
((( draw_widefigure_with_caption(output.metadata.filenames['application/pdf'], cell.metadata.caption, cell.metadata.label) )))
((*- else -*))
((*- if cell.metadata.caption: -*))
((*- if cell.metadata.label: -*))
((( draw_figure_with_caption(output.metadata.filenames['application/pdf'], cell.metadata.caption, cell.metadata.label) )))
((*- else -*))
((( draw_figure_with_caption(output.metadata.filenames['application/pdf'], cell.metadata.caption, "") )))
((*- endif *))
((*- else -*))
((( draw_figure_with_caption(output.metadata.filenames['application/pdf'], "") )))
((*- endif *))
((*- endif *))
((*- endblock -*))
% Draw a figure using the graphicx package.
((* macro draw_figure_with_caption(filename, caption, label) -*))
((* set filename = filename | posix_path *))
((*- block figure scoped -*))
((*- if label: -*))
\begin{figure}
\begin{center}\adjustimage{max size={0.9\linewidth}{0.4\paperheight}}{((( filename )))}\end{center}
\caption{((( caption )))}
\label{((( label )))}
\end{figure}
((*- else -*))
\begin{figure}
\begin{center}\adjustimage{max size={0.9\linewidth}{0.4\paperheight}}{((( filename )))}\end{center}
\caption{((( caption )))}
\end{figure}
((*- endif *))
((*- endblock figure -*))
((*- endmacro *))
% Draw a figure using the graphicx package.
((* macro draw_widefigure_with_caption(filename, caption, label) -*))
((* set filename = filename | posix_path *))
((*- block figure_wide scoped -*))
((*- if label: -*))
\begin{figure*}
\begin{center}\adjustimage{max size={0.9\linewidth}{0.4\paperheight}}{((( filename )))}\end{center}
\caption{((( caption )))}
\label{((( label )))}
\end{figure*}
((*- else -*))
\begin{figure*}
\begin{center}\adjustimage{max size={0.9\linewidth}{0.4\paperheight}}{((( filename )))}\end{center}
\caption{((( caption )))}
\end{figure*}
((*- endif *))
((*- endblock figure_wide -*))
((*- endmacro *))
Be careful, now all figure must have proper/unique label. DO NOT USE VOID label in the cell metadata, because the template will create void \label{} and \TeX compiler will give "multiply defined label warning". See the aux file. <==== SOLVED. Add \label{}, only when label cell-metadata exist! See above base.tplx.
M-x package-install ein
fg; bg; jobs; fg %jupyter[TAB] (in zsh); fg %2; emacs -nw; M-x suspend-emacs
To open notebook:
1. In shell, do the following to start jupyter server: conda activate py37; jupyter-notebook&
2. In Emacs: M-x ein:notebooklist-login , and (may need to) login with the token shown when jupyter-notebook is started.
3. In Emacs: may need to do M-x ein:notebooklist-open. The notebook list appears, then use [Tab] to navigate to/between the ipynb file to open.
4. Open the notebook by click open or [Ret] at the Open when Tab is moved there.
5. M-x ein:notebook-close
Execute cell: C-c C-c
NOT recommended to use ein for now. Reason:
notebook's author-metadata is changed. Don't know why.
Must install latex-preview, markdown-preview, etc...
(NOT related, but anyway) install Emacs package:
M-x package-install: latex-extra, auto-complete-auctex, yasnippet-snippets
Installation of latex etc, to enable "jupyter nbconvert asdf.ipynb --to pdf" in Ubuntu 18.04:
https://nbconvert.readthedocs.io/en/latest/install.html#installing-tex
sudo apt-get install texlive-lang-japanese
sudo apt-get install texlive-xetex
sudo apt-get install pandoc
sudo apt-get install fontconfig texlive-fonts-recommended texlive-generic-recommended
Setting jupyter notebook to output Japanese PDF:
Install nbextension printview to preview the print etc.:
conda activate py3; conda install -c conda-forge jupyter_contrib_nbextensions
For Ubuntu:
https://qiita.com/masa-ita/items/8d5ebe8afe0d580af184
Refer to above site, but do only the "xeCJK編;日本語フォントのみをしてする場合"
Environment base: ~/anaconda3/lib/python3.7/site-packages/nbconvert/templates/latex/base.tplx
Confirm there is no ascii_only in the base.tplx
Text Box
%% Obsoleted by below Mac solution. But, have problem with dvipdfmx
((* block packages *))
\usepackage[T1]{fontenc} \usepackage{xeCJK} \setCJKmainfont[BoldFont=IPAexGothic]{IPAexMincho} \setCJKsansfont{IPAexGothic} \setCJKmonofont{IPAGothic}
Warning from dvipdfmx:
dvipdfmx:warning: CMap has higher supplement number than CIDFont: Ryumin-Light
ほとど無害な警告:https://oku.edu.mie-u.ac.jp/tex/mod/forum/discuss.php?d=565
https://texwiki.texjp.org/?TeX%E3%81%A8%E3%83%95%E3%82%A9%E3%83%B3%E3%83%88#q1b5e62f
sudo kanji-config-updmap-sys status shows CURRENT family for ja as NoEmbed <= PROBLEM
Solution: sudo kanji-config-updmap-sys auto
OR, make it the same with mac: sudo kanji-config-updmap-sys ipaex
For Mac:
https://mana.bi/wiki.cgi?page=Jupyter+Notebook%A4%C7%A5%DC%A5%BF%A5%F31%A4%C4%A4%C7%C6%FC%CB%DC%B8%ECPDF%A4%F2%BA%EE%C0%AE%A4%B9%A4%EB#p11 <== NOT WORKING.
Same as the above Ubuntu, but NOT working... So, convert with ubuntu vm if includes Japanese OR below:
Prepare the template:
Confirm there is no ascii_only in the base.tplx
vi /Applications/anaconda2/envs/py37/lib/python3.7/site-packages/nbconvert/templates/latex/jsarticle.tplx
Text Box
% Default to the notebook output style
((* if not cell_style is defined *))
((* set cell_style = 'style_ipython.tplx' *))
((* endif *))
% Inherit from the specified cell style.
((* extends cell_style *))
%===============================================================================
% Latex Article
%===============================================================================
((* block docclass *))
\documentclass[uplatex,dvipdfmx,11pt,a4j]{jsarticle}
\usepackage{graphicx}
\usepackage{hyperref}
\usepackage{pxjahyper}
((* endblock docclass *))
((* block packages *))
\usepackage[T1]{fontenc}
% \usepackage{xeCJK}
\setlength{\parindent}{0in} % comment out this line from the output tex source,
% in case indent at new paragraph is needed.
% Basic figure setup, for now with no caption control since it's done
% automatically by Pandoc (which extracts  syntax from Markdown).
\usepackage{graphicx}
% Maintain compatibility with old templates. Remove in nbconvert 6.0
\let\Oldincludegraphics\includegraphics
% Ensure that by default, figures have no caption (until we provide a
% proper Figure object with a Caption API and a way to capture that
% in the conversion process - todo).
\usepackage{caption}
% \DeclareCaptionFormat{nocaption}{}
% \captionsetup{format=nocaption,aboveskip=0pt,belowskip=0pt}
\usepackage[Export]{adjustbox} % Used to constrain images to a maximum size
\adjustboxset{max size={0.9\linewidth}{0.9\paperheight}}
\usepackage{float}
\floatplacement{figure}{H} % forces figures to be placed at the correct location
\usepackage{xcolor} % Allow colors to be defined
\usepackage{enumerate} % Needed for markdown enumerations to work
\usepackage{geometry} % Used to adjust the document margins
\usepackage{amsmath} % Equations
\usepackage{amssymb} % Equations
\usepackage{textcomp} % defines textquotesingle
% Hack from http://tex.stackexchange.com/a/47451/13684:
\AtBeginDocument{%
\def\PYZsq{\textquotesingle}% Upright quotes in Pygmentized code
}
\usepackage{upquote} % Upright quotes for verbatim code
\usepackage{eurosym} % defines \euro
\usepackage[mathletters]{ucs} % Extended unicode (utf-8) support
\usepackage{fancyvrb} % verbatim replacement that allows latex
\usepackage{grffile} % extends the file name processing of package graphics
% to support a larger range
\makeatletter % fix for grffile with XeLaTeX
\def\Gread@@xetex#1{%
\IfFileExists{"\Gin@base".bb}%
{\Gread@eps{\Gin@base.bb}}%
{\Gread@@xetex@aux#1}%
}
\makeatother
% The hyperref package gives us a pdf with properly built
% internal navigation ('pdf bookmarks' for the table of contents,
% internal cross-reference links, web links for URLs, etc.)
\usepackage{hyperref}
% The default LaTeX title has an obnoxious amount of whitespace. By default,
% titling removes some of it. It also provides customization options.
\usepackage{titling}
\usepackage{longtable} % longtable support required by pandoc >1.10
\usepackage{booktabs} % table support for pandoc > 1.12.2
\usepackage[inline]{enumitem} % IRkernel/repr support (it uses the enumerate* environment)
\usepackage[normalem]{ulem} % ulem is needed to support strikethroughs (\sout)
% normalem makes italics be italics, not underlines
\usepackage{mathrsfs}
((* endblock packages *))
((* block maketitle *))
((*- if nb.metadata.get("latex_metadata", {}).get("title", ""): -*))
\title{((( nb.metadata["latex_metadata"]["title"] )))}
((*- else -*))
((*- set nb_title = nb.metadata.get('title', '') or resources['metadata']['name'] -*))
\title{((( nb_title | escape_latex )))}
((*- endif *))
((*- if nb.metadata.get("latex_metadata", {}).get("author", ""): -*))
\author{((( nb.metadata["latex_metadata"]["author"] )))}
((*- else -*))
((* if 'authors' in nb.metadata *))
\author{((( nb.metadata.authors | join(', ', attribute='name') )))}
((* else *))
\author{ivansetiawantky}
((* endif *))
((*- endif *))
((* if nb.metadata["date"]: *))
\date{((( nb.metadata["date"] )))}
((* endif *))
\maketitle
((* endblock maketitle *))
To create the final pdf, first convert from ipynb to the tex source. Then compile the tex source (maybe need to compile 2 times or more) to obtain dvi. Then convert the dvi to pdf file, so 3 steps:
jupyter nbconvert testjpfont.ipynb --to=latex --TemplateExporter.exclude_input=True --template=jsarticle.tplx --output-dir=output-dir
cd output-dir
uplatex testjpfont.tex (2 times to get the cross-reference right)
dvipdfmx testjpfont.dvi
open testjpfont.pdf
Actually, below ptex2pdf is doing uplatex + dvipdfmx:
ptex2pdf -u -l -ot "--shell-escape" testjpfont.tex (2 times re-run to validate reference)
-shell-escape is also OK: ptex2pdf -u -l -ot "-shell-escape" testjpfont.tex
Now, maybe need to change the ubuntu's solution as in Mac (without using xeCJK).
OK, but have problem with dvipdfmx (see above).
FINAL METHOD/TEMPLATE OF NBCONVERT TO PDF:
DO NOT litter/modify the system installed base.tplx and jsarticle.tplx existing in: /path/to/anaconda/.../lib/python3.7/site-packages/nbconvert/templatex/latex
For English only, when doing nbconvert --to pdf, the default template used is style_jupyter.tplx. So:
Create ivans_style_jupyter.tplx (copy from the default style_jupyter.tplx)
Then, modify ivans_style_jupyter.tplx to inherit (extends) from ivans_base.tplx
Modify ivans_base.tplx as needed (so, now the original base.tplx is remain untouched)
Save both ivans_style_jupyter.tplx and ivans_base.tplx to the same directory.
Convert to pdf with:
conda activate base / py37
jupyter nbconvert 20200225_geo_tan.ipynb --to=pdf --TemplateExporter.exclude_input=True --template=../../latex-tplx/ivans_style_jupyter.tplx && open 20200225_geo_tan.pdf
For Japanese, the template is ivans_jsarticle.tplx which inherit from style_ipython.tplx. So:
Create ivans_style_ipython.tplx (copy from the default style_ipython.tplx)
Then, modify ivans_style_ipython.tplx to inherit (extends) from ivans_base.tplx
Modify ivans_jsarticle.tplx to inherit (extends) from ivans_style_ipython.tplx
Modify ivans_base.tplx as needed (so, now the original base.tplx is remain untouched). This will also impact the English environment.
Save both ivans_style_ipython.tplx and ivans_jsarticle.tplx to the same directory.
Convert first to TeX source, then to pdf:
conda activate base / py37
rm -rf output-dir; jupyter nbconvert testjpfont.ipynb --to=latex --TemplateExporter.exclude_input=True --template=../../latex-tplx/ivans_jsarticle.tplx --output-dir=output-dir
cd output-dir
ptex2pdf -u -l -ot "-shell-escape" testjpfont.tex (2 times)
The class hierarchy is as below:
ivans_base.tplx
/ \
ivans_style_jupyter.tplx ivans_style_ipython.tplx
\
ivans_jsarticle.tplx
Prepare the notebook server (start server only, do not open browser, etc.):
https://qiita.com/syo_cream/items/05553b41277523a131fd
conda activate base (py3); ipython
In[1]: from notebook.auth import passwd
In[2]: passwd()
Out[2]: 'sha1:....' <-- memo this then quit() the ipython
mkdir ~/.jupyter
vim ~/.jupyter/jupyter_notebook_config.py
Text Box
c = get_config()# Notebook上でplotを表示できるようにする c.IPKernelApp.pylab = 'inline'# 全てのIPから接続を許可 c.NotebookApp.ip = '*'# IPython notebookのログインパスワード c.NotebookApp.password = 'sha1:[ハッシュ化されたパスワード]'# 起動時にブラウザを起動させるかの設定 c.NotebookApp.open_browser = False# ポート指定 c.NotebookApp.port = 8888
jupyter notebook <- start server
Port forward jupyter server port to local port:
https://sites.google.com/site/ivansetiawantky/macwork (search ssh -L)
OR, tunautossh.sh
Java source (Android) -> JavaDoc
Swift source (iOS) -> Jazzy (by realm)
C/C++ -> doxygen
https://qiita.com/et79/items/782a5e29156292c4e461
cd source
doxygen -g
Change PROJECT_NAME, INPUT, EXCLUDE, GENERATE_LATEX
doxygen Doxyfile >| doxy.log
Only the header file (*.h) needs to be source documented.
The style is like below (Use /// and ///<):
// -*- coding: utf-8 -*-
/// @namespace Namespace (not needed)
/// @brief Brief description
/// @details Detailed description (@details keyword not necessary)
/// @author Ivan Setiawan <ivan@aa.co.jp>, A Company, Inc.
/// @author Ivan Setiawan <ivan@aa.co.jp>
/// @date Last modified: Wed Feb 27 11:04:29 2019.
#pragma once
#include "aaaa.h"
namespace NName {
/// @brief Hello
/// @details Class for reading ...
/// @see Wavfile reference: <https://>
class ReadFile
{
private:
/// File pointer.
FILE *ptr;
/// Reads samples from the file ...
/// @return Number of read bytes.
int read(short *buffer, ///< Pointer to buffer to read data from.
int maxNum ///< Size of buffer (number of element).
);
Another example of documenting a header file: http://www.statmt.org/moses/?n=Moses.CodeStyle
Example of documenting a header file for an abstract class (with pure virtual member function):
// -*- coding: utf-8 -*-
/// @file tsm.h
/// @brief Header file for the abstract class CTsm. Include this file in every
/// header files that define implementations of Time Scale Modification.
/// @details File tsm.h defines the base-class CTsm which is intended as the
/// base-class of derived-class that implements Time Scale Modification
/// algorithm.
/// @author Ivan Setiawan <bb@email.jp>, company name
/// @author Ivan Setiawan <aa@moreemail.jp>
/// @date Last modified: Sun Mar 10 17:05:58 2019.
#pragma once
#include <iostream>
#include <stdexcept>
#include <boost/shared_ptr.hpp>
/// @brief The macro TypedefineSharedPtr to define a new type "Name" as a shared
/// pointer to a class "Class".
/// @details TypedefineSharedPtr macro is used in all headers that includes
/// tsm.h to easily define a new type as "shared pointer to a class."
#define TypedefineSharedPtr(Class, Name) typedef boost::shared_ptr<Class> Name;
/// Namespace NTsm for the abstract class CTsm.
namespace NTsm {
class CTsm;
/// Type CTsmPtr is a type of shared pointer to CTsm.
TypedefineSharedPtr( CTsm, CTsmPtr );
/// Namespace NWavFileFmt for the enumeration EWavFileFmt
namespace NWavFileFmt {
/// Enumeration EWavFileFmt for the file format of the input audio: raw or wav.
enum EWavFileFmt {
rawFile = 0, ///< Raw input file
wavFile, ///< Wav input file
NUM_WAVFILEFMT ///< Number of file formats
};
}
// Time Scale Modification (TSM) with PICOLA.
// Input: raw (currently raw only) input filename (ex: output.raw from HTS)
// Output: raw (currently raw only) output filename
//
/// @brief Abstract class CTsm as the base-class for derived-class that
/// implements Time Scale Modification.
/// @details The class CTsm declares interfaces that must be implemented by the
/// derived-class to perform time scale modification.
class CTsm {
public:
virtual ~CTsm() {}
// Currently only one implementation of TSM, i.e., PICOLA TSM, so no need to
// have argument to specify which implementation is used.
/// @brief Create the object of type CTsmPtr.
/// @details Currently, modify the createTsm
/// definition in the source file to instantiate an object from the desired
/// class that implements the TSM algorithm to be used.
static CTsmPtr createTsm( std::istream& in, ///< input stream
std::ostream& out ///< output stream
);
/// @brief Set the TSM processing parameters, including input file
/// parameters (sampling frequency, etc.) in the case of raw file input.
virtual const bool setTsmParams(
const NWavFileFmt::EWavFileFmt filefmt, ///< Input file format
const unsigned int sampleFreq, ///< Sampling frequency
const unsigned short numCh, ///< Number of channels
const unsigned short bytesPerSample, ///< Number of bytes per sample
const double ratio ///< Ratio of output-length to
/// input-length (i.e., inverse
/// of the output speed). Less
/// than 1.0 means the output
/// has faster tempo.
) = 0;
/// Perform the Time Scale Modification
virtual const bool doTsm() = 0;
};
}
Use below code:
// Use (gcc) compiler preprocessor to detect endianness.
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define COMPILED_FOR_BIG_ENDIAN 1
#define COMPILED_FOR_LITTLE_ENDIAN 0
#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define COMPILED_FOR_BIG_ENDIAN 0
#define COMPILED_FOR_LITTLE_ENDIAN 1
#else
#error "Compiler does NOT support __BYTE_ORDER__ checking"
#endif
inline int isBigEndian()
{
int i=1;
return ! *((char *)&i);
}
int main( int argc, char* argv[] )
{
using namespace std;
using namespace NTsm;
// Check endianness at runtime.
#if COMPILED_FOR_BIG_ENDIAN
if( !isBigEndian() ) {
std::cerr << "NOT big endian!\n";
exit( -1 );
}
#elif COMPILED_FOR_LITTLE_ENDIAN
assert( !isBigEndian() );
#else
#error "No compiled-check endianness macro is set"
#endif
The problem in endianness is only when writing/reading in more than 1 byte, i.e., 2-byte short, 4-byte int. While reading/writing to file (for serializing) per 1-byte (char) is always OK.
Other strategy for dealing with endianness is always write in little-endian. And, always read in little-endian, but then reverse it if in big-endian. Good thing using this strategy is data serialized to file in little-endian-manner can be used as it is in a big-endian system. To do this, writing, reading 4 bytes (2 bytes) also must be explicitly performed in per 1-byte, STARTING from LSB. The code is like below.
void Write4Bytes(FILE *f, int value)
{//=================================
// Write 4 bytes to a file, least significant first
int ix;
for(ix=0; ix<4; ix++)
{
fputc(value & 0xff,f);
value = value >> 8;
}
}
int Read4Bytes(FILE *f)
{//====================
// Read 4 bytes (least significant first) into a word
int ix;
unsigned char c;
int acc=0;
for(ix=0; ix<4; ix++)
{
c = fgetc(f) & 0xff;
acc += (c << (ix*8));
}
return(acc);
}
int Reverse4Bytes(int word)
{//==========================
// reverse the order of bytes from little-endian to big-endian
#ifdef ARCH_BIG
int ix;
int word2 = 0;
for(ix=0; ix<=24; ix+=8)
{
word2 = word2 << 8;
word2 |= (word >> ix) & 0xff;
}
return(word2);
#else
return(word);
#endif
}
See espeak codes for details.
setlocale(LC_CTYPE,"en_US.UTF-8");
// multy-byte (variable length)
std::string s = u8"abcあいうえお順def";
fprintf( stderr, "var length std::string s = u8 %s, %lu\n", s.c_str(), s.size() );
for( std::string::iterator it=s.begin() ; it!=s.end() ; it++) {
fprintf( stderr, "%c:%lu, ", *it, sizeof(*it) );
}
fprintf( stderr, "\n\n" );
std::string s2 = u8"あいうえお順";
fprintf( stderr, "var length std::string s2 = u8 %s, %lu\n\n", s2.c_str(), s2.size() );
// wide string (fixed length)
std::wstring s3 = L"abcあいうえお順def";
fprintf( stderr, "fixed wide length std::wstring s3 = L %ls, %lu\n", s3.c_str(), s3.size() );
for( std::wstring::iterator it=s3.begin() ; it!=s3.end() ; it++) {
fprintf( stderr, "%lc:%lu, ", *it, sizeof(*it) );
}
fprintf( stderr, "\n" );
fprintf( stderr, "sizeof(char) = %lu\n", sizeof(char) );
fprintf( stderr, "sizeof(wchar_t) = %lu\n", sizeof(wchar_t) );
Output:
var length std::string s = u8 abcあいうえお順def, 24
a:1, b:1, c:1, �:1, �:1, �:1, �:1, �:1, �:1, �:1, �:1, �:1, �:1, �:1, �:1, �:1, �:1, �:1, �:1, �:1, �:1, d:1, e:1, f:1,
var length std::string s2 = u8 あいうえお順, 18
fixed wide length std::wstring s3 = L abcあいうえお順def, 12
a:4, b:4, c:4, あ:4, い:4, う:4, え:4, お:4, 順:4, d:4, e:4, f:4,
sizeof(char) = 1
sizeof(wchar_t) = 4
Switch-case statement only receives integer inside the switch. To virtually use "string" type, use the string literals such as L('a', 'f'). L is for wide-char.
Translator *SelectTranslator(const char *name)
{//===========================================
int name2 = 0;
// convert name string into a word of up to 4 characters, for the switch()
while(*name != 0)
name2 = (name2 << 8) + *name++;
switch(name2)
{
case L('a','f'):
{ ...
}
break;
case L('a','m'): // Amharic, Ethiopia
{
...