In [2]:
using LinearAlgebra
A = rand(3,3) + I
inv(A)
Out[2]:
3×3 Array{Float64,2}:
  0.726387   0.0267446  -0.210078 
 -0.709669   1.07082     0.0928403
  0.264726  -0.826993    0.806143 
In [3]:
strang(n) = SymTridiagonal(2*ones(n),-ones(n-1))
strang(4)
# Note the types
Out[3]:
4×4 SymTridiagonal{Float64,Array{Float64,1}}:
  2.0  -1.0    ⋅     ⋅ 
 -1.0   2.0  -1.0    ⋅ 
   ⋅   -1.0   2.0  -1.0
   ⋅     ⋅   -1.0   2.0
In [4]:
strang(4)\ones(4)
Out[4]:
4-element Array{Float64,1}:
 1.9999999999999998
 2.9999999999999996
 2.9999999999999996
 2.0               
In [5]:
\
Out[5]:
\ (generic function with 152 methods)
In [6]:
# Multiple Dispatch
methods(eigvals)
Out[6]:
14 methods for generic function eigvals:
In [7]:
# Types

import Base: *
*(a::Number, g::Function)= x->a*g(x)   # Scale output
*(f::Function,t::Number) = x->f(t*x)   # Scale argument
*(f::Function,g::Function)= x->f(g(x)) # Function composition
Out[7]:
* (generic function with 346 methods)
In [8]:
f(n) = n+2
print(f(2))
g = f*2
g(2)
4
Out[8]:
6
In [9]:
# Unicode
🛀(🔨) = 2 + 🔨
🛀(3)
Out[9]:
5
In [ ]:
# Macros
In [11]:
@macroexpand @evalpoly 10 3 4 5 6
Out[11]:
:(let #22#tt = 10
      #= math.jl:150 =#
      if #22#tt isa Base.Math.Complex
          #15#x = (Base.Math.real)(#22#tt)
          #16#y = (Base.Math.imag)(#22#tt)
          #17#r = #15#x + #15#x
          #18#s = (Base.Math.muladd)(#15#x, #15#x, #16#y * #16#y)
          #19#a2 = 6
          #20#a1 = (Base.Math.muladd)(#17#r, #19#a2, 5)
          #21#a0 = (Base.Math.muladd)(#17#r, #20#a1, 4 - #18#s * #19#a2)
          (Base.Math.muladd)(#21#a0, #22#tt, 3 - #18#s * #20#a1)
      else
          #23#t = #22#tt
          (Base.Math.muladd)(#23#t, (Base.Math.muladd)(#23#t, (Base.Math.muladd)(#23#t, 6, 5), 4), 3)
      end
  end)
In [12]:
macro sayhello(name)
           return :( println("Hello, ", $name) )
       end
@sayhello "Bob"
Hello, Bob
In [13]:
f(a,b) = a+b
Out[13]:
f (generic function with 2 methods)
In [14]:
methods(f)
Out[14]:
2 methods for generic function f:
  • f(n) in Main at In[8]:1
  • f(a, b) in Main at In[13]:1
In [15]:
f(2,3)
Out[15]:
5
In [16]:
f(2.0,3)
Out[16]:
5.0
In [21]:
@code_native f(2,3)
	.text
; Function f {
; Location: In[13]:1
; Function +; {
; Location: In[13]:1
	leaq	(%rdi,%rsi), %rax
;}
	retq
	nopw	%cs:(%rax,%rax)
;}
In [24]:
# It's Dynamic
x = rand(Bool) ?  1 :  1.0
y = rand(Bool) ?  1 :  1.0
x+y
Out[24]:
2
In [26]:
# Performance Tips
# What not to do:
a = Real[];

struct MyAmbiguousType
        b
end

MyAmbiguousType(2)
MyAmbiguousType(3.0)

matfun(a,b) = MyAmbiguousType(a.b + b.b)
@code_native(matfun(MyAmbiguousType(2), MyAmbiguousType(3.0)))
	.text
; Function matfun {
; Location: In[26]:12
	pushq	%r15
	pushq	%r14
	pushq	%rbx
	subq	$64, %rsp
	vxorps	%xmm0, %xmm0, %xmm0
	vmovaps	%xmm0, (%rsp)
	movq	$0, 16(%rsp)
	movq	%rsi, 56(%rsp)
	movq	%fs:0, %rbx
	movq	$2, (%rsp)
	movq	-10920(%rbx), %rax
	movq	%rax, 8(%rsp)
	movq	%rsp, %rax
	movq	%rax, -10920(%rbx)
	leaq	-10920(%rbx), %r14
	movq	(%rsi), %rax
	movq	8(%rsi), %rcx
; Function getproperty; {
; Location: sysimg.jl:18
	movq	(%rax), %rax
	movq	(%rcx), %rcx
;}
	movabsq	$jl_system_image_data, %rdx
	movq	%rdx, 32(%rsp)
	movq	%rax, 40(%rsp)
	movq	%rcx, 48(%rsp)
	movabsq	$jl_apply_generic, %rax
	leaq	32(%rsp), %rdi
	movl	$3, %esi
	callq	*%rax
	movq	%rax, %r15
	movq	%r15, 16(%rsp)
; Function Type; {
; Location: In[26]:6
	movabsq	$jl_gc_pool_alloc, %rax
	movl	$1424, %esi             # imm = 0x590
	movl	$16, %edx
	movq	%r14, %rdi
	callq	*%rax
	movabsq	$140336610376032, %rcx  # imm = 0x7FA2A9CF7960
	movq	%rcx, -8(%rax)
	movq	%r15, (%rax)
;}
	movq	8(%rsp), %rcx
	movq	%rcx, -10920(%rbx)
	addq	$64, %rsp
	popq	%rbx
	popq	%r14
	popq	%r15
	retq
;}
In [33]:
function msum(A::AbstractArray)
    r = zero(eltype(A))
    for i = 1:length(A)
        @fastmath @inbounds r += A[i]/π
    end
    return r
end

arr = rand(10000000)
@time msum(arr)
@time msum(arr)
  0.022817 seconds (15.42 k allocations: 825.830 KiB)
  0.010917 seconds (5 allocations: 176 bytes)
Out[33]:
1.5915408596138828e6
In [ ]:
# Given Channels c1 and c2,
c1 = Channel(32)
c2 = Channel(32)

# and a function `foo`
function foo()
    while true
        data = take!(c1)
        result = data*2               # process data
        put!(c2, result)    # write out result
    end
end

# we can schedule `n` instances of `foo`
# to be active concurrently.

for _ in 1:n
    @async foo()
end
take!(c2)
In [34]:
Threads.nthreads()
Out[34]:
1
In [ ]:
j = Threads.Atomic{Int}(0);
a = zeros(10)
Threads.@threads for i = 1:10
    a[i] = Threads.threadid()
    Threads.atomic_add!(j, i)
end
print(j)
a
In [ ]:
using Distributed
addprocs(2)

Bref = @spawn rand(1000,1000)^2;
In [ ]:
Bref
fetch(Bref)
In [ ]:
using Distributed
using SharedArrays

a = SharedArray{Float64}(10)
@sync @distributed for i = 1:10
    a[i] = i
end

b = @distributed (+) for i = 1:10
    a[i]
end

b+ 10
In [ ]:
# Dot Syntax
f(n) = n+2
f.([1,2,3])
In [ ]:
k(a,b) = ((a .+ b) ./ 10)
@code_lowered(k([1,2,3], [10 20 30 40]))
k([1,2,3], [10 20 30 40])
In [ ]:
# https://nextjournal.com/sdanisch/julia-gpu-programming
using Colors
using GPUArrays

function juliaset(z0, maxiter)
    c = ComplexF32(-0.5, 0.75)
    z = z0
    for i in 1:maxiter
        abs2(z) > 4f0 && return (i - 1) % UInt8
        z = z * z + c
    end
    return maxiter % UInt8 # % is used to convert without overflow check
end

w,h = 50,50
Typ = Array
q = Typ([ComplexF32(r, i) for i=1:-(2.0/w):-1, r=-1.5:(3.0/h):1.5])
out = Typ(zeros(UInt8, size(q)))
out .= juliaset.(q, 16)
GPUArrays.synchronize(out)


cmap = colormap("Blues", 16 + 1)
color_lookup(val, cmap) = cmap[val + 1]
color_lookup.(out,(cmap,))
In [ ]: